From 268400a2dcc550a859d6254b2909502373097466 Mon Sep 17 00:00:00 2001 From: zlei9 Date: Sun, 22 Mar 2026 11:41:50 +0800 Subject: [PATCH] Initial commit: claude-api skill --- LICENSE.txt | 202 ++++++++++ SKILL.md | 243 ++++++++++++ csharp/claude-api.md | 70 ++++ curl/examples.md | 164 ++++++++ go/claude-api.md | 146 +++++++ java/claude-api.md | 128 +++++++ php/claude-api.md | 88 +++++ python/agent-sdk/README.md | 269 +++++++++++++ python/agent-sdk/patterns.md | 319 ++++++++++++++++ python/claude-api/README.md | 404 ++++++++++++++++++++ python/claude-api/batches.md | 182 +++++++++ python/claude-api/files-api.md | 162 ++++++++ python/claude-api/streaming.md | 162 ++++++++ python/claude-api/tool-use.md | 587 +++++++++++++++++++++++++++++ ruby/claude-api.md | 87 +++++ shared/error-codes.md | 205 ++++++++++ shared/live-sources.md | 121 ++++++ shared/models.md | 68 ++++ shared/tool-use-concepts.md | 305 +++++++++++++++ typescript/agent-sdk/README.md | 220 +++++++++++ typescript/agent-sdk/patterns.md | 150 ++++++++ typescript/claude-api/README.md | 313 +++++++++++++++ typescript/claude-api/batches.md | 106 ++++++ typescript/claude-api/files-api.md | 98 +++++ typescript/claude-api/streaming.md | 178 +++++++++ typescript/claude-api/tool-use.md | 477 +++++++++++++++++++++++ 26 files changed, 5454 insertions(+) create mode 100644 LICENSE.txt create mode 100644 SKILL.md create mode 100644 csharp/claude-api.md create mode 100644 curl/examples.md create mode 100644 go/claude-api.md create mode 100644 java/claude-api.md create mode 100644 php/claude-api.md create mode 100644 python/agent-sdk/README.md create mode 100644 python/agent-sdk/patterns.md create mode 100644 python/claude-api/README.md create mode 100644 python/claude-api/batches.md create mode 100644 python/claude-api/files-api.md create mode 100644 python/claude-api/streaming.md create mode 100644 python/claude-api/tool-use.md create mode 100644 ruby/claude-api.md create mode 100644 shared/error-codes.md create mode 100644 shared/live-sources.md create mode 100644 shared/models.md create mode 100644 shared/tool-use-concepts.md create mode 100644 typescript/agent-sdk/README.md create mode 100644 typescript/agent-sdk/patterns.md create mode 100644 typescript/claude-api/README.md create mode 100644 typescript/claude-api/batches.md create mode 100644 typescript/claude-api/files-api.md create mode 100644 typescript/claude-api/streaming.md create mode 100644 typescript/claude-api/tool-use.md diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/SKILL.md b/SKILL.md new file mode 100644 index 0000000..0fa8429 --- /dev/null +++ b/SKILL.md @@ -0,0 +1,243 @@ +--- +name: claude-api +description: "Build apps with the Claude API or Anthropic SDK. TRIGGER when: code imports `anthropic`/`@anthropic-ai/sdk`/`claude_agent_sdk`, or user asks to use Claude API, Anthropic SDKs, or Agent SDK. DO NOT TRIGGER when: code imports `openai`/other AI SDK, general programming, or ML/data-science tasks." +license: Complete terms in LICENSE.txt +--- + +# Building LLM-Powered Applications with Claude + +This skill helps you build LLM-powered applications with Claude. Choose the right surface based on your needs, detect the project language, then read the relevant language-specific documentation. + +## Defaults + +Unless the user requests otherwise: + +For the Claude model version, please use Claude Opus 4.6, which you can access via the exact model string `claude-opus-4-6`. Please default to using adaptive thinking (`thinking: {type: "adaptive"}`) for anything remotely complicated. And finally, please default to streaming for any request that may involve long input, long output, or high `max_tokens` — it prevents hitting request timeouts. Use the SDK's `.get_final_message()` / `.finalMessage()` helper to get the complete response if you don't need to handle individual stream events + +--- + +## Language Detection + +Before reading code examples, determine which language the user is working in: + +1. **Look at project files** to infer the language: + + - `*.py`, `requirements.txt`, `pyproject.toml`, `setup.py`, `Pipfile` → **Python** — read from `python/` + - `*.ts`, `*.tsx`, `package.json`, `tsconfig.json` → **TypeScript** — read from `typescript/` + - `*.js`, `*.jsx` (no `.ts` files present) → **TypeScript** — JS uses the same SDK, read from `typescript/` + - `*.java`, `pom.xml`, `build.gradle` → **Java** — read from `java/` + - `*.kt`, `*.kts`, `build.gradle.kts` → **Java** — Kotlin uses the Java SDK, read from `java/` + - `*.scala`, `build.sbt` → **Java** — Scala uses the Java SDK, read from `java/` + - `*.go`, `go.mod` → **Go** — read from `go/` + - `*.rb`, `Gemfile` → **Ruby** — read from `ruby/` + - `*.cs`, `*.csproj` → **C#** — read from `csharp/` + - `*.php`, `composer.json` → **PHP** — read from `php/` + +2. **If multiple languages detected** (e.g., both Python and TypeScript files): + + - Check which language the user's current file or question relates to + - If still ambiguous, ask: "I detected both Python and TypeScript files. Which language are you using for the Claude API integration?" + +3. **If language can't be inferred** (empty project, no source files, or unsupported language): + + - Use AskUserQuestion with options: Python, TypeScript, Java, Go, Ruby, cURL/raw HTTP, C#, PHP + - If AskUserQuestion is unavailable, default to Python examples and note: "Showing Python examples. Let me know if you need a different language." + +4. **If unsupported language detected** (Rust, Swift, C++, Elixir, etc.): + + - Suggest cURL/raw HTTP examples from `curl/` and note that community SDKs may exist + - Offer to show Python or TypeScript examples as reference implementations + +5. **If user needs cURL/raw HTTP examples**, read from `curl/`. + +### Language-Specific Feature Support + +| Language | Tool Runner | Agent SDK | Notes | +| ---------- | ----------- | --------- | ------------------------------------- | +| Python | Yes (beta) | Yes | Full support — `@beta_tool` decorator | +| TypeScript | Yes (beta) | Yes | Full support — `betaZodTool` + Zod | +| Java | Yes (beta) | No | Beta tool use with annotated classes | +| Go | Yes (beta) | No | `BetaToolRunner` in `toolrunner` pkg | +| Ruby | Yes (beta) | No | `BaseTool` + `tool_runner` in beta | +| cURL | N/A | N/A | Raw HTTP, no SDK features | +| C# | No | No | Official SDK | +| PHP | No | No | Official SDK | + +--- + +## Which Surface Should I Use? + +> **Start simple.** Default to the simplest tier that meets your needs. Single API calls and workflows handle most use cases — only reach for agents when the task genuinely requires open-ended, model-driven exploration. + +| Use Case | Tier | Recommended Surface | Why | +| ----------------------------------------------- | --------------- | ------------------------- | --------------------------------------- | +| Classification, summarization, extraction, Q&A | Single LLM call | **Claude API** | One request, one response | +| Batch processing or embeddings | Single LLM call | **Claude API** | Specialized endpoints | +| Multi-step pipelines with code-controlled logic | Workflow | **Claude API + tool use** | You orchestrate the loop | +| Custom agent with your own tools | Agent | **Claude API + tool use** | Maximum flexibility | +| AI agent with file/web/terminal access | Agent | **Agent SDK** | Built-in tools, safety, and MCP support | +| Agentic coding assistant | Agent | **Agent SDK** | Designed for this use case | +| Want built-in permissions and guardrails | Agent | **Agent SDK** | Safety features included | + +> **Note:** The Agent SDK is for when you want built-in file/web/terminal tools, permissions, and MCP out of the box. If you want to build an agent with your own tools, Claude API is the right choice — use the tool runner for automatic loop handling, or the manual loop for fine-grained control (approval gates, custom logging, conditional execution). + +### Decision Tree + +``` +What does your application need? + +1. Single LLM call (classification, summarization, extraction, Q&A) + └── Claude API — one request, one response + +2. Does Claude need to read/write files, browse the web, or run shell commands + as part of its work? (Not: does your app read a file and hand it to Claude — + does Claude itself need to discover and access files/web/shell?) + └── Yes → Agent SDK — built-in tools, don't reimplement them + Examples: "scan a codebase for bugs", "summarize every file in a directory", + "find bugs using subagents", "research a topic via web search" + +3. Workflow (multi-step, code-orchestrated, with your own tools) + └── Claude API with tool use — you control the loop + +4. Open-ended agent (model decides its own trajectory, your own tools) + └── Claude API agentic loop (maximum flexibility) +``` + +### Should I Build an Agent? + +Before choosing the agent tier, check all four criteria: + +- **Complexity** — Is the task multi-step and hard to fully specify in advance? (e.g., "turn this design doc into a PR" vs. "extract the title from this PDF") +- **Value** — Does the outcome justify higher cost and latency? +- **Viability** — Is Claude capable at this task type? +- **Cost of error** — Can errors be caught and recovered from? (tests, review, rollback) + +If the answer is "no" to any of these, stay at a simpler tier (single call or workflow). + +--- + +## Architecture + +Everything goes through `POST /v1/messages`. Tools and output constraints are features of this single endpoint — not separate APIs. + +**User-defined tools** — You define tools (via decorators, Zod schemas, or raw JSON), and the SDK's tool runner handles calling the API, executing your functions, and looping until Claude is done. For full control, you can write the loop manually. + +**Server-side tools** — Anthropic-hosted tools that run on Anthropic's infrastructure. Code execution is fully server-side (declare it in `tools`, Claude runs code automatically). Computer use can be server-hosted or self-hosted. + +**Structured outputs** — Constrains the Messages API response format (`output_config.format`) and/or tool parameter validation (`strict: true`). The recommended approach is `client.messages.parse()` which validates responses against your schema automatically. Note: the old `output_format` parameter is deprecated; use `output_config: {format: {...}}` on `messages.create()`. + +**Supporting endpoints** — Batches (`POST /v1/messages/batches`), Files (`POST /v1/files`), and Token Counting feed into or support Messages API requests. + +--- + +## Current Models (cached: 2026-02-17) + +| Model | Model ID | Context | Input $/1M | Output $/1M | +| ----------------- | ------------------- | -------------- | ---------- | ----------- | +| Claude Opus 4.6 | `claude-opus-4-6` | 200K (1M beta) | $5.00 | $25.00 | +| Claude Sonnet 4.6 | `claude-sonnet-4-6` | 200K (1M beta) | $3.00 | $15.00 | +| Claude Haiku 4.5 | `claude-haiku-4-5` | 200K | $1.00 | $5.00 | + +**ALWAYS use `claude-opus-4-6` unless the user explicitly names a different model.** This is non-negotiable. Do not use `claude-sonnet-4-6`, `claude-sonnet-4-5`, or any other model unless the user literally says "use sonnet" or "use haiku". Never downgrade for cost — that's the user's decision, not yours. + +**CRITICAL: Use only the exact model ID strings from the table above — they are complete as-is. Do not append date suffixes.** For example, use `claude-sonnet-4-5`, never `claude-sonnet-4-5-20250514` or any other date-suffixed variant you might recall from training data. If the user requests an older model not in the table (e.g., "opus 4.5", "sonnet 3.7"), read `shared/models.md` for the exact ID — do not construct one yourself. + +A note: if any of the model strings above look unfamiliar to you, that's to be expected — that just means they were released after your training data cutoff. Rest assured they are real models; we wouldn't mess with you like that. + +--- + +## Thinking & Effort (Quick Reference) + +**Opus 4.6 — Adaptive thinking (recommended):** Use `thinking: {type: "adaptive"}`. Claude dynamically decides when and how much to think. No `budget_tokens` needed — `budget_tokens` is deprecated on Opus 4.6 and Sonnet 4.6 and must not be used. Adaptive thinking also automatically enables interleaved thinking (no beta header needed). **When the user asks for "extended thinking", a "thinking budget", or `budget_tokens`: always use Opus 4.6 with `thinking: {type: "adaptive"}`. The concept of a fixed token budget for thinking is deprecated — adaptive thinking replaces it. Do NOT use `budget_tokens` and do NOT switch to an older model.** + +**Effort parameter (GA, no beta header):** Controls thinking depth and overall token spend via `output_config: {effort: "low"|"medium"|"high"|"max"}` (inside `output_config`, not top-level). Default is `high` (equivalent to omitting it). `max` is Opus 4.6 only. Works on Opus 4.5, Opus 4.6, and Sonnet 4.6. Will error on Sonnet 4.5 / Haiku 4.5. Combine with adaptive thinking for the best cost-quality tradeoffs. Use `low` for subagents or simple tasks; `max` for the deepest reasoning. + +**Sonnet 4.6:** Supports adaptive thinking (`thinking: {type: "adaptive"}`). `budget_tokens` is deprecated on Sonnet 4.6 — use adaptive thinking instead. + +**Older models (only if explicitly requested):** If the user specifically asks for Sonnet 4.5 or another older model, use `thinking: {type: "enabled", budget_tokens: N}`. `budget_tokens` must be less than `max_tokens` (minimum 1024). Never choose an older model just because the user mentions `budget_tokens` — use Opus 4.6 with adaptive thinking instead. + +--- + +## Compaction (Quick Reference) + +**Beta, Opus 4.6 only.** For long-running conversations that may exceed the 200K context window, enable server-side compaction. The API automatically summarizes earlier context when it approaches the trigger threshold (default: 150K tokens). Requires beta header `compact-2026-01-12`. + +**Critical:** Append `response.content` (not just the text) back to your messages on every turn. Compaction blocks in the response must be preserved — the API uses them to replace the compacted history on the next request. Extracting only the text string and appending that will silently lose the compaction state. + +See `{lang}/claude-api/README.md` (Compaction section) for code examples. Full docs via WebFetch in `shared/live-sources.md`. + +--- + +## Reading Guide + +After detecting the language, read the relevant files based on what the user needs: + +### Quick Task Reference + +**Single text classification/summarization/extraction/Q&A:** +→ Read only `{lang}/claude-api/README.md` + +**Chat UI or real-time response display:** +→ Read `{lang}/claude-api/README.md` + `{lang}/claude-api/streaming.md` + +**Long-running conversations (may exceed context window):** +→ Read `{lang}/claude-api/README.md` — see Compaction section + +**Function calling / tool use / agents:** +→ Read `{lang}/claude-api/README.md` + `shared/tool-use-concepts.md` + `{lang}/claude-api/tool-use.md` + +**Batch processing (non-latency-sensitive):** +→ Read `{lang}/claude-api/README.md` + `{lang}/claude-api/batches.md` + +**File uploads across multiple requests:** +→ Read `{lang}/claude-api/README.md` + `{lang}/claude-api/files-api.md` + +**Agent with built-in tools (file/web/terminal):** +→ Read `{lang}/agent-sdk/README.md` + `{lang}/agent-sdk/patterns.md` + +### Claude API (Full File Reference) + +Read the **language-specific Claude API folder** (`{language}/claude-api/`): + +1. **`{language}/claude-api/README.md`** — **Read this first.** Installation, quick start, common patterns, error handling. +2. **`shared/tool-use-concepts.md`** — Read when the user needs function calling, code execution, memory, or structured outputs. Covers conceptual foundations. +3. **`{language}/claude-api/tool-use.md`** — Read for language-specific tool use code examples (tool runner, manual loop, code execution, memory, structured outputs). +4. **`{language}/claude-api/streaming.md`** — Read when building chat UIs or interfaces that display responses incrementally. +5. **`{language}/claude-api/batches.md`** — Read when processing many requests offline (not latency-sensitive). Runs asynchronously at 50% cost. +6. **`{language}/claude-api/files-api.md`** — Read when sending the same file across multiple requests without re-uploading. +7. **`shared/error-codes.md`** — Read when debugging HTTP errors or implementing error handling. +8. **`shared/live-sources.md`** — WebFetch URLs for fetching the latest official documentation. + +> **Note:** For Java, Go, Ruby, C#, PHP, and cURL — these have a single file each covering all basics. Read that file plus `shared/tool-use-concepts.md` and `shared/error-codes.md` as needed. + +### Agent SDK + +Read the **language-specific Agent SDK folder** (`{language}/agent-sdk/`). Agent SDK is available for **Python and TypeScript only**. + +1. **`{language}/agent-sdk/README.md`** — Installation, quick start, built-in tools, permissions, MCP, hooks. +2. **`{language}/agent-sdk/patterns.md`** — Custom tools, hooks, subagents, MCP integration, session resumption. +3. **`shared/live-sources.md`** — WebFetch URLs for current Agent SDK docs. + +--- + +## When to Use WebFetch + +Use WebFetch to get the latest documentation when: + +- User asks for "latest" or "current" information +- Cached data seems incorrect +- User asks about features not covered here + +Live documentation URLs are in `shared/live-sources.md`. + +## Common Pitfalls + +- Don't truncate inputs when passing files or content to the API. If the content is too long to fit in the context window, notify the user and discuss options (chunking, summarization, etc.) rather than silently truncating. +- **Opus 4.6 / Sonnet 4.6 thinking:** Use `thinking: {type: "adaptive"}` — do NOT use `budget_tokens` (deprecated on both Opus 4.6 and Sonnet 4.6). For older models, `budget_tokens` must be less than `max_tokens` (minimum 1024). This will throw an error if you get it wrong. +- **Opus 4.6 prefill removed:** Assistant message prefills (last-assistant-turn prefills) return a 400 error on Opus 4.6. Use structured outputs (`output_config.format`) or system prompt instructions to control response format instead. +- **128K output tokens:** Opus 4.6 supports up to 128K `max_tokens`, but the SDKs require streaming for large `max_tokens` to avoid HTTP timeouts. Use `.stream()` with `.get_final_message()` / `.finalMessage()`. +- **Tool call JSON parsing (Opus 4.6):** Opus 4.6 may produce different JSON string escaping in tool call `input` fields (e.g., Unicode or forward-slash escaping). Always parse tool inputs with `json.loads()` / `JSON.parse()` — never do raw string matching on the serialized input. +- **Structured outputs (all models):** Use `output_config: {format: {...}}` instead of the deprecated `output_format` parameter on `messages.create()`. This is a general API change, not 4.6-specific. +- **Don't reimplement SDK functionality:** The SDK provides high-level helpers — use them instead of building from scratch. Specifically: use `stream.finalMessage()` instead of wrapping `.on()` events in `new Promise()`; use typed exception classes (`Anthropic.RateLimitError`, etc.) instead of string-matching error messages; use SDK types (`Anthropic.MessageParam`, `Anthropic.Tool`, `Anthropic.Message`, etc.) instead of redefining equivalent interfaces. +- **Don't define custom types for SDK data structures:** The SDK exports types for all API objects. Use `Anthropic.MessageParam` for messages, `Anthropic.Tool` for tool definitions, `Anthropic.ToolUseBlock` / `Anthropic.ToolResultBlockParam` for tool results, `Anthropic.Message` for responses. Defining your own `interface ChatMessage { role: string; content: unknown }` duplicates what the SDK already provides and loses type safety. +- **Report and document output:** For tasks that produce reports, documents, or visualizations, the code execution sandbox has `python-docx`, `python-pptx`, `matplotlib`, `pillow`, and `pypdf` pre-installed. Claude can generate formatted files (DOCX, PDF, charts) and return them via the Files API — consider this for "report" or "document" type requests instead of plain stdout text. diff --git a/csharp/claude-api.md b/csharp/claude-api.md new file mode 100644 index 0000000..cfb938a --- /dev/null +++ b/csharp/claude-api.md @@ -0,0 +1,70 @@ +# Claude API — C# + +> **Note:** The C# SDK is the official Anthropic SDK for C#. Tool use is supported via the Messages API. A class-annotation-based tool runner is not available; use raw tool definitions with JSON schema. The SDK also supports Microsoft.Extensions.AI IChatClient integration with function invocation. + +## Installation + +```bash +dotnet add package Anthropic +``` + +## Client Initialization + +```csharp +using Anthropic; + +// Default (uses ANTHROPIC_API_KEY env var) +AnthropicClient client = new(); + +// Explicit API key (use environment variables — never hardcode keys) +AnthropicClient client = new() { + ApiKey = Environment.GetEnvironmentVariable("ANTHROPIC_API_KEY") +}; +``` + +--- + +## Basic Message Request + +```csharp +using Anthropic.Models.Messages; + +var parameters = new MessageCreateParams +{ + Model = Model.ClaudeOpus4_6, + MaxTokens = 1024, + Messages = [new() { Role = Role.User, Content = "What is the capital of France?" }] +}; +var message = await client.Messages.Create(parameters); +Console.WriteLine(message); +``` + +--- + +## Streaming + +```csharp +using Anthropic.Models.Messages; + +var parameters = new MessageCreateParams +{ + Model = Model.ClaudeOpus4_6, + MaxTokens = 1024, + Messages = [new() { Role = Role.User, Content = "Write a haiku" }] +}; + +await foreach (RawMessageStreamEvent streamEvent in client.Messages.CreateStreaming(parameters)) +{ + if (streamEvent.TryPickContentBlockDelta(out var delta) && + delta.Delta.TryPickText(out var text)) + { + Console.Write(text.Text); + } +} +``` + +--- + +## Tool Use (Manual Loop) + +The C# SDK supports raw tool definitions via JSON schema. See the [shared tool use concepts](../shared/tool-use-concepts.md) for the tool definition format and agentic loop pattern. diff --git a/curl/examples.md b/curl/examples.md new file mode 100644 index 0000000..f33e11b --- /dev/null +++ b/curl/examples.md @@ -0,0 +1,164 @@ +# Claude API — cURL / Raw HTTP + +Use these examples when the user needs raw HTTP requests or is working in a language without an official SDK. + +## Setup + +```bash +export ANTHROPIC_API_KEY="your-api-key" +``` + +--- + +## Basic Message Request + +```bash +curl https://api.anthropic.com/v1/messages \ + -H "Content-Type: application/json" \ + -H "x-api-key: $ANTHROPIC_API_KEY" \ + -H "anthropic-version: 2023-06-01" \ + -d '{ + "model": "claude-opus-4-6", + "max_tokens": 1024, + "messages": [ + {"role": "user", "content": "What is the capital of France?"} + ] + }' +``` + +--- + +## Streaming (SSE) + +```bash +curl https://api.anthropic.com/v1/messages \ + -H "Content-Type: application/json" \ + -H "x-api-key: $ANTHROPIC_API_KEY" \ + -H "anthropic-version: 2023-06-01" \ + -d '{ + "model": "claude-opus-4-6", + "max_tokens": 1024, + "stream": true, + "messages": [{"role": "user", "content": "Write a haiku"}] + }' +``` + +The response is a stream of Server-Sent Events: + +``` +event: message_start +data: {"type":"message_start","message":{"id":"msg_...","type":"message",...}} + +event: content_block_start +data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello"}} + +event: content_block_stop +data: {"type":"content_block_stop","index":0} + +event: message_delta +data: {"type":"message_delta","delta":{"stop_reason":"end_turn"},"usage":{"output_tokens":12}} + +event: message_stop +data: {"type":"message_stop"} +``` + +--- + +## Tool Use + +```bash +curl https://api.anthropic.com/v1/messages \ + -H "Content-Type: application/json" \ + -H "x-api-key: $ANTHROPIC_API_KEY" \ + -H "anthropic-version: 2023-06-01" \ + -d '{ + "model": "claude-opus-4-6", + "max_tokens": 1024, + "tools": [{ + "name": "get_weather", + "description": "Get current weather for a location", + "input_schema": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City name"} + }, + "required": ["location"] + } + }], + "messages": [{"role": "user", "content": "What is the weather in Paris?"}] + }' +``` + +When Claude responds with a `tool_use` block, send the result back: + +```bash +curl https://api.anthropic.com/v1/messages \ + -H "Content-Type: application/json" \ + -H "x-api-key: $ANTHROPIC_API_KEY" \ + -H "anthropic-version: 2023-06-01" \ + -d '{ + "model": "claude-opus-4-6", + "max_tokens": 1024, + "tools": [{ + "name": "get_weather", + "description": "Get current weather for a location", + "input_schema": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City name"} + }, + "required": ["location"] + } + }], + "messages": [ + {"role": "user", "content": "What is the weather in Paris?"}, + {"role": "assistant", "content": [ + {"type": "text", "text": "Let me check the weather."}, + {"type": "tool_use", "id": "toolu_abc123", "name": "get_weather", "input": {"location": "Paris"}} + ]}, + {"role": "user", "content": [ + {"type": "tool_result", "tool_use_id": "toolu_abc123", "content": "72°F and sunny"} + ]} + ] + }' +``` + +--- + +## Extended Thinking + +> **Opus 4.6 and Sonnet 4.6:** Use adaptive thinking. `budget_tokens` is deprecated on both Opus 4.6 and Sonnet 4.6. +> **Older models:** Use `"type": "enabled"` with `"budget_tokens": N` (must be < `max_tokens`, min 1024). + +```bash +# Opus 4.6: adaptive thinking (recommended) +curl https://api.anthropic.com/v1/messages \ + -H "Content-Type: application/json" \ + -H "x-api-key: $ANTHROPIC_API_KEY" \ + -H "anthropic-version: 2023-06-01" \ + -d '{ + "model": "claude-opus-4-6", + "max_tokens": 16000, + "thinking": { + "type": "adaptive" + }, + "output_config": { + "effort": "high" + }, + "messages": [{"role": "user", "content": "Solve this step by step..."}] + }' +``` + +--- + +## Required Headers + +| Header | Value | Description | +| ------------------- | ------------------ | -------------------------- | +| `Content-Type` | `application/json` | Required | +| `x-api-key` | Your API key | Authentication | +| `anthropic-version` | `2023-06-01` | API version | +| `anthropic-beta` | Beta feature IDs | Required for beta features | diff --git a/go/claude-api.md b/go/claude-api.md new file mode 100644 index 0000000..7ba732a --- /dev/null +++ b/go/claude-api.md @@ -0,0 +1,146 @@ +# Claude API — Go + +> **Note:** The Go SDK supports the Claude API and beta tool use with `BetaToolRunner`. Agent SDK is not yet available for Go. + +## Installation + +```bash +go get github.com/anthropics/anthropic-sdk-go +``` + +## Client Initialization + +```go +import ( + "github.com/anthropics/anthropic-sdk-go" + "github.com/anthropics/anthropic-sdk-go/option" +) + +// Default (uses ANTHROPIC_API_KEY env var) +client := anthropic.NewClient() + +// Explicit API key +client := anthropic.NewClient( + option.WithAPIKey("your-api-key"), +) +``` + +--- + +## Basic Message Request + +```go +response, err := client.Messages.New(context.TODO(), anthropic.MessageNewParams{ + Model: anthropic.ModelClaudeOpus4_6, + MaxTokens: 1024, + Messages: []anthropic.MessageParam{ + anthropic.NewUserMessage(anthropic.NewTextBlock("What is the capital of France?")), + }, +}) +if err != nil { + log.Fatal(err) +} +fmt.Println(response.Content[0].Text) +``` + +--- + +## Streaming + +```go +stream := client.Messages.NewStreaming(context.TODO(), anthropic.MessageNewParams{ + Model: anthropic.ModelClaudeOpus4_6, + MaxTokens: 1024, + Messages: []anthropic.MessageParam{ + anthropic.NewUserMessage(anthropic.NewTextBlock("Write a haiku")), + }, +}) + +for stream.Next() { + event := stream.Current() + switch eventVariant := event.AsAny().(type) { + case anthropic.ContentBlockDeltaEvent: + switch deltaVariant := eventVariant.Delta.AsAny().(type) { + case anthropic.TextDelta: + fmt.Print(deltaVariant.Text) + } + } +} +if err := stream.Err(); err != nil { + log.Fatal(err) +} +``` + +--- + +## Tool Use + +### Tool Runner (Beta — Recommended) + +**Beta:** The Go SDK provides `BetaToolRunner` for automatic tool use loops via the `toolrunner` package. + +```go +import ( + "context" + "fmt" + "log" + + "github.com/anthropics/anthropic-sdk-go" + "github.com/anthropics/anthropic-sdk-go/toolrunner" +) + +// Define tool input with jsonschema tags for automatic schema generation +type GetWeatherInput struct { + City string `json:"city" jsonschema:"required,description=The city name"` +} + +// Create a tool with automatic schema generation from struct tags +weatherTool, err := toolrunner.NewBetaToolFromJSONSchema( + "get_weather", + "Get current weather for a city", + func(ctx context.Context, input GetWeatherInput) (anthropic.BetaToolResultBlockParamContentUnion, error) { + return anthropic.BetaToolResultBlockParamContentUnion{ + OfText: &anthropic.BetaTextBlockParam{ + Text: fmt.Sprintf("The weather in %s is sunny, 72°F", input.City), + }, + }, nil + }, +) +if err != nil { + log.Fatal(err) +} + +// Create a tool runner that handles the conversation loop automatically +runner := client.Beta.Messages.NewToolRunner( + []anthropic.BetaTool{weatherTool}, + anthropic.BetaToolRunnerParams{ + BetaMessageNewParams: anthropic.BetaMessageNewParams{ + Model: anthropic.ModelClaudeOpus4_6, + MaxTokens: 1024, + Messages: []anthropic.BetaMessageParam{ + anthropic.NewBetaUserMessage(anthropic.NewBetaTextBlock("What's the weather in Paris?")), + }, + }, + MaxIterations: 5, + }, +) + +// Run until Claude produces a final response +message, err := runner.RunToCompletion(context.Background()) +if err != nil { + log.Fatal(err) +} +fmt.Println(message.Content[0].Text) +``` + +**Key features of the Go tool runner:** + +- Automatic schema generation from Go structs via `jsonschema` tags +- `RunToCompletion()` for simple one-shot usage +- `All()` iterator for processing each message in the conversation +- `NextMessage()` for step-by-step iteration +- Streaming variant via `NewToolRunnerStreaming()` with `AllStreaming()` + +### Manual Loop + +For fine-grained control, use raw tool definitions via JSON schema. See the [shared tool use concepts](../shared/tool-use-concepts.md) for the tool definition format and agentic loop pattern. diff --git a/java/claude-api.md b/java/claude-api.md new file mode 100644 index 0000000..d618dfe --- /dev/null +++ b/java/claude-api.md @@ -0,0 +1,128 @@ +# Claude API — Java + +> **Note:** The Java SDK supports the Claude API and beta tool use with annotated classes. Agent SDK is not yet available for Java. + +## Installation + +Maven: + +```xml + + com.anthropic + anthropic-java + 2.15.0 + +``` + +Gradle: + +```groovy +implementation("com.anthropic:anthropic-java:2.15.0") +``` + +## Client Initialization + +```java +import com.anthropic.client.AnthropicClient; +import com.anthropic.client.okhttp.AnthropicOkHttpClient; + +// Default (reads ANTHROPIC_API_KEY from environment) +AnthropicClient client = AnthropicOkHttpClient.fromEnv(); + +// Explicit API key +AnthropicClient client = AnthropicOkHttpClient.builder() + .apiKey("your-api-key") + .build(); +``` + +--- + +## Basic Message Request + +```java +import com.anthropic.models.messages.MessageCreateParams; +import com.anthropic.models.messages.Message; +import com.anthropic.models.messages.Model; + +MessageCreateParams params = MessageCreateParams.builder() + .model(Model.CLAUDE_OPUS_4_6) + .maxTokens(1024L) + .addUserMessage("What is the capital of France?") + .build(); + +Message response = client.messages().create(params); +response.content().stream() + .flatMap(block -> block.text().stream()) + .forEach(textBlock -> System.out.println(textBlock.text())); +``` + +--- + +## Streaming + +```java +import com.anthropic.core.http.StreamResponse; +import com.anthropic.models.messages.RawMessageStreamEvent; + +MessageCreateParams params = MessageCreateParams.builder() + .model(Model.CLAUDE_OPUS_4_6) + .maxTokens(1024L) + .addUserMessage("Write a haiku") + .build(); + +try (StreamResponse streamResponse = client.messages().createStreaming(params)) { + streamResponse.stream() + .flatMap(event -> event.contentBlockDelta().stream()) + .flatMap(deltaEvent -> deltaEvent.delta().text().stream()) + .forEach(textDelta -> System.out.print(textDelta.text())); +} +``` + +--- + +## Tool Use (Beta) + +The Java SDK supports beta tool use with annotated classes. Tool classes implement `Supplier` for automatic execution via `BetaToolRunner`. + +### Tool Runner (automatic loop) + +```java +import com.anthropic.models.beta.messages.MessageCreateParams; +import com.anthropic.models.beta.messages.BetaMessage; +import com.anthropic.helpers.BetaToolRunner; +import com.fasterxml.jackson.annotation.JsonClassDescription; +import com.fasterxml.jackson.annotation.JsonPropertyDescription; +import java.util.function.Supplier; + +@JsonClassDescription("Get the weather in a given location") +static class GetWeather implements Supplier { + @JsonPropertyDescription("The city and state, e.g. San Francisco, CA") + public String location; + + @Override + public String get() { + return "The weather in " + location + " is sunny and 72°F"; + } +} + +BetaToolRunner toolRunner = client.beta().messages().toolRunner( + MessageCreateParams.builder() + .model("claude-opus-4-6") + .maxTokens(1024L) + .putAdditionalHeader("anthropic-beta", "structured-outputs-2025-11-13") + .addTool(GetWeather.class) + .addUserMessage("What's the weather in San Francisco?") + .build()); + +for (BetaMessage message : toolRunner) { + System.out.println(message); +} +``` + +### Non-Beta Tool Use + +Tool use is also available through the non-beta `com.anthropic.models.messages.MessageCreateParams` with `addTool(Tool)` for manually defined JSON schemas, without needing the beta namespace. The beta namespace is only needed for the class-annotation convenience layer (`@JsonClassDescription`, `BetaToolRunner`). + +### Manual Loop + +For manual tool loops, define tools as JSON schema in the request, handle `tool_use` blocks in the response, send `tool_result` back, and loop until `stop_reason` is `"end_turn"`. See the [shared tool use concepts](../shared/tool-use-concepts.md) for the agentic loop pattern. diff --git a/php/claude-api.md b/php/claude-api.md new file mode 100644 index 0000000..4aab6b4 --- /dev/null +++ b/php/claude-api.md @@ -0,0 +1,88 @@ +# Claude API — PHP + +> **Note:** The PHP SDK is the official Anthropic SDK for PHP. Tool runner and Agent SDK are not available. Bedrock, Vertex AI, and Foundry clients are supported. + +## Installation + +```bash +composer require "anthropic-ai/sdk" +``` + +## Client Initialization + +```php +use Anthropic\Client; + +// Using API key from environment variable +$client = new Client(apiKey: getenv("ANTHROPIC_API_KEY")); +``` + +### Amazon Bedrock + +```php +use Anthropic\BedrockClient; + +$client = new BedrockClient( + region: 'us-east-1', +); +``` + +### Google Vertex AI + +```php +use Anthropic\VertexClient; + +$client = new VertexClient( + region: 'us-east5', + projectId: 'my-project-id', +); +``` + +### Anthropic Foundry + +```php +use Anthropic\FoundryClient; + +$client = new FoundryClient( + authToken: getenv("ANTHROPIC_AUTH_TOKEN"), +); +``` + +--- + +## Basic Message Request + +```php +$message = $client->messages->create( + model: 'claude-opus-4-6', + maxTokens: 1024, + messages: [ + ['role' => 'user', 'content' => 'What is the capital of France?'], + ], +); +echo $message->content[0]->text; +``` + +--- + +## Streaming + +```php +$stream = $client->messages->createStream( + model: 'claude-opus-4-6', + maxTokens: 1024, + messages: [ + ['role' => 'user', 'content' => 'Write a haiku'], + ], +); + +foreach ($stream as $event) { + echo $event; +} +``` + +--- + +## Tool Use (Manual Loop) + +The PHP SDK supports raw tool definitions via JSON schema. See the [shared tool use concepts](../shared/tool-use-concepts.md) for the tool definition format and agentic loop pattern. diff --git a/python/agent-sdk/README.md b/python/agent-sdk/README.md new file mode 100644 index 0000000..c819c67 --- /dev/null +++ b/python/agent-sdk/README.md @@ -0,0 +1,269 @@ +# Agent SDK — Python + +The Claude Agent SDK provides a higher-level interface for building AI agents with built-in tools, safety features, and agentic capabilities. + +## Installation + +```bash +pip install claude-agent-sdk +``` + +--- + +## Quick Start + +```python +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async def main(): + async for message in query( + prompt="Explain this codebase", + options=ClaudeAgentOptions(allowed_tools=["Read", "Glob", "Grep"]) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` + +--- + +## Built-in Tools + +| Tool | Description | +| --------- | ------------------------------------ | +| Read | Read files in the workspace | +| Write | Create new files | +| Edit | Make precise edits to existing files | +| Bash | Execute shell commands | +| Glob | Find files by pattern | +| Grep | Search files by content | +| WebSearch | Search the web for information | +| WebFetch | Fetch and analyze web pages | +| AskUserQuestion | Ask user clarifying questions | +| Agent | Spawn subagents | + +--- + +## Primary Interfaces + +### `query()` — Simple One-Shot Usage + +The `query()` function is the simplest way to run an agent. It returns an async iterator of messages. + +```python +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async for message in query( + prompt="Explain this codebase", + options=ClaudeAgentOptions(allowed_tools=["Read", "Glob", "Grep"]) +): + if isinstance(message, ResultMessage): + print(message.result) +``` + +### `ClaudeSDKClient` — Full Control + +`ClaudeSDKClient` provides full control over the agent lifecycle. Use it when you need custom tools, hooks, streaming, or the ability to interrupt execution. + +```python +import anyio +from claude_agent_sdk import ClaudeSDKClient, ClaudeAgentOptions, AssistantMessage, TextBlock + +async def main(): + options = ClaudeAgentOptions(allowed_tools=["Read", "Glob", "Grep"]) + async with ClaudeSDKClient(options=options) as client: + await client.query("Explain this codebase") + async for message in client.receive_response(): + if isinstance(message, AssistantMessage): + for block in message.content: + if isinstance(block, TextBlock): + print(block.text) + +anyio.run(main) +``` + +`ClaudeSDKClient` supports: + +- **Context manager** (`async with`) for automatic resource cleanup +- **`client.query(prompt)`** to send a prompt to the agent +- **`receive_response()`** for streaming messages until completion +- **`interrupt()`** to stop agent execution mid-task +- **Required for custom tools** (via SDK MCP servers) + +--- + +## Permission System + +```python +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async for message in query( + prompt="Refactor the authentication module", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Edit", "Write"], + permission_mode="acceptEdits" # Auto-accept file edits + ) +): + if isinstance(message, ResultMessage): + print(message.result) +``` + +Permission modes: + +- `"default"`: Prompt for dangerous operations +- `"plan"`: Planning only, no execution +- `"acceptEdits"`: Auto-accept file edits +- `"dontAsk"`: Don't prompt (useful for CI/CD) +- `"bypassPermissions"`: Skip all prompts (requires `allow_dangerously_skip_permissions=True` in options) + +--- + +## MCP (Model Context Protocol) Support + +```python +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async for message in query( + prompt="Open example.com and describe what you see", + options=ClaudeAgentOptions( + mcp_servers={ + "playwright": {"command": "npx", "args": ["@playwright/mcp@latest"]} + } + ) +): + if isinstance(message, ResultMessage): + print(message.result) +``` + +--- + +## Hooks + +Customize agent behavior with hooks using callback functions: + +```python +from claude_agent_sdk import query, ClaudeAgentOptions, HookMatcher, ResultMessage + +async def log_file_change(input_data, tool_use_id, context): + file_path = input_data.get('tool_input', {}).get('file_path', 'unknown') + print(f"Modified: {file_path}") + return {} + +async for message in query( + prompt="Refactor utils.py", + options=ClaudeAgentOptions( + permission_mode="acceptEdits", + hooks={ + "PostToolUse": [HookMatcher(matcher="Edit|Write", hooks=[log_file_change])] + } + ) +): + if isinstance(message, ResultMessage): + print(message.result) +``` + +Available hook events: `PreToolUse`, `PostToolUse`, `PostToolUseFailure`, `Notification`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Stop`, `SubagentStart`, `SubagentStop`, `PreCompact`, `PermissionRequest`, `Setup`, `TeammateIdle`, `TaskCompleted`, `ConfigChange` + +--- + +## Common Options + +`query()` takes a top-level `prompt` (string) and an `options` object (`ClaudeAgentOptions`): + +```python +async for message in query(prompt="...", options=ClaudeAgentOptions(...)): +``` + +| Option | Type | Description | +| ----------------------------------- | ------ | -------------------------------------------------------------------------- | +| `cwd` | string | Working directory for file operations | +| `allowed_tools` | list | Tools the agent can use (e.g., `["Read", "Edit", "Bash"]`) | +| `tools` | list | Built-in tools to make available (restricts the default set) | +| `disallowed_tools` | list | Tools to explicitly disallow | +| `permission_mode` | string | How to handle permission prompts | +| `allow_dangerously_skip_permissions`| bool | Must be `True` to use `permission_mode="bypassPermissions"` | +| `mcp_servers` | dict | MCP servers to connect to | +| `hooks` | dict | Hooks for customizing behavior | +| `system_prompt` | string | Custom system prompt | +| `max_turns` | int | Maximum agent turns before stopping | +| `max_budget_usd` | float | Maximum budget in USD for the query | +| `model` | string | Model ID (default: determined by CLI) | +| `agents` | dict | Subagent definitions (`dict[str, AgentDefinition]`) | +| `output_format` | dict | Structured output schema | +| `thinking` | dict | Thinking/reasoning control | +| `betas` | list | Beta features to enable (e.g., `["context-1m-2025-08-07"]`) | +| `setting_sources` | list | Settings to load (e.g., `["project"]`). Default: none (no CLAUDE.md files) | +| `env` | dict | Environment variables to set for the session | + +--- + +## Message Types + +```python +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage, SystemMessage + +async for message in query( + prompt="Find TODO comments", + options=ClaudeAgentOptions(allowed_tools=["Read", "Glob", "Grep"]) +): + if isinstance(message, ResultMessage): + print(message.result) + elif isinstance(message, SystemMessage) and message.subtype == "init": + session_id = message.session_id # Capture for resuming later +``` + +--- + +## Subagents + +```python +from claude_agent_sdk import query, ClaudeAgentOptions, AgentDefinition, ResultMessage + +async for message in query( + prompt="Use the code-reviewer agent to review this codebase", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Glob", "Grep", "Agent"], + agents={ + "code-reviewer": AgentDefinition( + description="Expert code reviewer for quality and security reviews.", + prompt="Analyze code quality and suggest improvements.", + tools=["Read", "Glob", "Grep"] + ) + } + ) +): + if isinstance(message, ResultMessage): + print(message.result) +``` + +--- + +## Error Handling + +```python +from claude_agent_sdk import query, ClaudeAgentOptions, CLINotFoundError, CLIConnectionError, ResultMessage + +try: + async for message in query( + prompt="...", + options=ClaudeAgentOptions(allowed_tools=["Read"]) + ): + if isinstance(message, ResultMessage): + print(message.result) +except CLINotFoundError: + print("Claude Code CLI not found. Install with: pip install claude-agent-sdk") +except CLIConnectionError as e: + print(f"Connection error: {e}") +``` + +--- + +## Best Practices + +1. **Always specify allowed_tools** — Explicitly list which tools the agent can use +2. **Set working directory** — Always specify `cwd` for file operations +3. **Use appropriate permission modes** — Start with `"default"` and only escalate when needed +4. **Handle all message types** — Check for `ResultMessage` to get agent output +5. **Limit max_turns** — Prevent runaway agents with reasonable limits diff --git a/python/agent-sdk/patterns.md b/python/agent-sdk/patterns.md new file mode 100644 index 0000000..5e6ea91 --- /dev/null +++ b/python/agent-sdk/patterns.md @@ -0,0 +1,319 @@ +# Agent SDK Patterns — Python + +## Basic Agent + +```python +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async def main(): + async for message in query( + prompt="Explain what this repository does", + options=ClaudeAgentOptions( + cwd="/path/to/project", + allowed_tools=["Read", "Glob", "Grep"] + ) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` + +--- + +## Custom Tools + +Custom tools require an MCP server. Use `ClaudeSDKClient` for full control, or pass the server to `query()` via `mcp_servers`. + +```python +import anyio +from claude_agent_sdk import ( + tool, + create_sdk_mcp_server, + ClaudeSDKClient, + ClaudeAgentOptions, + AssistantMessage, + TextBlock, +) + +@tool("get_weather", "Get the current weather for a location", {"location": str}) +async def get_weather(args): + location = args["location"] + return {"content": [{"type": "text", "text": f"The weather in {location} is sunny and 72°F."}]} + +server = create_sdk_mcp_server("weather-tools", tools=[get_weather]) + +async def main(): + options = ClaudeAgentOptions(mcp_servers={"weather": server}) + async with ClaudeSDKClient(options=options) as client: + await client.query("What's the weather in Paris?") + async for message in client.receive_response(): + if isinstance(message, AssistantMessage): + for block in message.content: + if isinstance(block, TextBlock): + print(block.text) + +anyio.run(main) +``` + +--- + +## Hooks + +### After Tool Use Hook + +Log file changes after any edit: + +```python +import anyio +from datetime import datetime +from claude_agent_sdk import query, ClaudeAgentOptions, HookMatcher, ResultMessage + +async def log_file_change(input_data, tool_use_id, context): + file_path = input_data.get('tool_input', {}).get('file_path', 'unknown') + with open('./audit.log', 'a') as f: + f.write(f"{datetime.now()}: modified {file_path}\n") + return {} + +async def main(): + async for message in query( + prompt="Refactor utils.py to improve readability", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Edit", "Write"], + permission_mode="acceptEdits", + hooks={ + "PostToolUse": [HookMatcher(matcher="Edit|Write", hooks=[log_file_change])] + } + ) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` + +--- + +## Subagents + +```python +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions, AgentDefinition, ResultMessage + +async def main(): + async for message in query( + prompt="Use the code-reviewer agent to review this codebase", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Glob", "Grep", "Agent"], + agents={ + "code-reviewer": AgentDefinition( + description="Expert code reviewer for quality and security reviews.", + prompt="Analyze code quality and suggest improvements.", + tools=["Read", "Glob", "Grep"] + ) + } + ) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` + +--- + +## MCP Server Integration + +### Browser Automation (Playwright) + +```python +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async def main(): + async for message in query( + prompt="Open example.com and describe what you see", + options=ClaudeAgentOptions( + mcp_servers={ + "playwright": {"command": "npx", "args": ["@playwright/mcp@latest"]} + } + ) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` + +### Database Access (PostgreSQL) + +```python +import os +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async def main(): + async for message in query( + prompt="Show me the top 10 users by order count", + options=ClaudeAgentOptions( + mcp_servers={ + "postgres": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-postgres"], + "env": {"DATABASE_URL": os.environ["DATABASE_URL"]} + } + } + ) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` + +--- + +## Permission Modes + +```python +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions + +async def main(): + # Default: prompt for dangerous operations + async for message in query( + prompt="Delete all test files", + options=ClaudeAgentOptions( + allowed_tools=["Bash"], + permission_mode="default" # Will prompt before deleting + ) + ): + pass + + # Plan: agent creates a plan before making changes + async for message in query( + prompt="Refactor the auth system", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Edit"], + permission_mode="plan" + ) + ): + pass + + # Accept edits: auto-accept file edits + async for message in query( + prompt="Refactor this module", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Edit"], + permission_mode="acceptEdits" + ) + ): + pass + + # Bypass: skip all prompts (use with caution) + async for message in query( + prompt="Set up the development environment", + options=ClaudeAgentOptions( + allowed_tools=["Bash", "Write"], + permission_mode="bypassPermissions", + allow_dangerously_skip_permissions=True + ) + ): + pass + +anyio.run(main) +``` + +--- + +## Error Recovery + +```python +import anyio +from claude_agent_sdk import ( + query, + ClaudeAgentOptions, + CLINotFoundError, + CLIConnectionError, + ProcessError, + ResultMessage, +) + +async def run_with_recovery(): + try: + async for message in query( + prompt="Fix the failing tests", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Edit", "Bash"], + max_turns=10 + ) + ): + if isinstance(message, ResultMessage): + print(message.result) + except CLINotFoundError: + print("Claude Code CLI not found. Install with: pip install claude-agent-sdk") + except CLIConnectionError as e: + print(f"Connection error: {e}") + except ProcessError as e: + print(f"Process error: {e}") + +anyio.run(run_with_recovery) +``` + +--- + +## Session Resumption + +```python +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage, SystemMessage + +async def main(): + session_id = None + + # First query: capture the session ID + async for message in query( + prompt="Read the authentication module", + options=ClaudeAgentOptions(allowed_tools=["Read", "Glob"]) + ): + if isinstance(message, SystemMessage) and message.subtype == "init": + session_id = message.session_id + + # Resume with full context from the first query + async for message in query( + prompt="Now find all places that call it", # "it" = auth module + options=ClaudeAgentOptions(resume=session_id) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` + +--- + +## Custom System Prompt + +```python +import anyio +from claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage + +async def main(): + async for message in query( + prompt="Review this code", + options=ClaudeAgentOptions( + allowed_tools=["Read", "Glob", "Grep"], + system_prompt="""You are a senior code reviewer focused on: +1. Security vulnerabilities +2. Performance issues +3. Code maintainability + +Always provide specific line numbers and suggestions for improvement.""" + ) + ): + if isinstance(message, ResultMessage): + print(message.result) + +anyio.run(main) +``` diff --git a/python/claude-api/README.md b/python/claude-api/README.md new file mode 100644 index 0000000..ae8801b --- /dev/null +++ b/python/claude-api/README.md @@ -0,0 +1,404 @@ +# Claude API — Python + +## Installation + +```bash +pip install anthropic +``` + +## Client Initialization + +```python +import anthropic + +# Default (uses ANTHROPIC_API_KEY env var) +client = anthropic.Anthropic() + +# Explicit API key +client = anthropic.Anthropic(api_key="your-api-key") + +# Async client +async_client = anthropic.AsyncAnthropic() +``` + +--- + +## Basic Message Request + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[ + {"role": "user", "content": "What is the capital of France?"} + ] +) +print(response.content[0].text) +``` + +--- + +## System Prompts + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + system="You are a helpful coding assistant. Always provide examples in Python.", + messages=[{"role": "user", "content": "How do I read a JSON file?"}] +) +``` + +--- + +## Vision (Images) + +### Base64 + +```python +import base64 + +with open("image.png", "rb") as f: + image_data = base64.standard_b64encode(f.read()).decode("utf-8") + +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": image_data + } + }, + {"type": "text", "text": "What's in this image?"} + ] + }] +) +``` + +### URL + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "url", + "url": "https://example.com/image.png" + } + }, + {"type": "text", "text": "Describe this image"} + ] + }] +) +``` + +--- + +## Prompt Caching + +Cache large context to reduce costs (up to 90% savings). + +### Automatic Caching (Recommended) + +Use top-level `cache_control` to automatically cache the last cacheable block in the request — no need to annotate individual content blocks: + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + cache_control={"type": "ephemeral"}, # auto-caches the last cacheable block + system="You are an expert on this large document...", + messages=[{"role": "user", "content": "Summarize the key points"}] +) +``` + +### Manual Cache Control + +For fine-grained control, add `cache_control` to specific content blocks: + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + system=[{ + "type": "text", + "text": "You are an expert on this large document...", + "cache_control": {"type": "ephemeral"} # default TTL is 5 minutes + }], + messages=[{"role": "user", "content": "Summarize the key points"}] +) + +# With explicit TTL (time-to-live) +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + system=[{ + "type": "text", + "text": "You are an expert on this large document...", + "cache_control": {"type": "ephemeral", "ttl": "1h"} # 1 hour TTL + }], + messages=[{"role": "user", "content": "Summarize the key points"}] +) +``` + +--- + +## Extended Thinking + +> **Opus 4.6 and Sonnet 4.6:** Use adaptive thinking. `budget_tokens` is deprecated on both Opus 4.6 and Sonnet 4.6. +> **Older models:** Use `thinking: {type: "enabled", budget_tokens: N}` (must be < `max_tokens`, min 1024). + +```python +# Opus 4.6: adaptive thinking (recommended) +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=16000, + thinking={"type": "adaptive"}, + output_config={"effort": "high"}, # low | medium | high | max + messages=[{"role": "user", "content": "Solve this step by step..."}] +) + +# Access thinking and response +for block in response.content: + if block.type == "thinking": + print(f"Thinking: {block.thinking}") + elif block.type == "text": + print(f"Response: {block.text}") +``` + +--- + +## Error Handling + +```python +import anthropic + +try: + response = client.messages.create(...) +except anthropic.BadRequestError as e: + print(f"Bad request: {e.message}") +except anthropic.AuthenticationError: + print("Invalid API key") +except anthropic.PermissionDeniedError: + print("API key lacks required permissions") +except anthropic.NotFoundError: + print("Invalid model or endpoint") +except anthropic.RateLimitError as e: + retry_after = int(e.response.headers.get("retry-after", "60")) + print(f"Rate limited. Retry after {retry_after}s.") +except anthropic.APIStatusError as e: + if e.status_code >= 500: + print(f"Server error ({e.status_code}). Retry later.") + else: + print(f"API error: {e.message}") +except anthropic.APIConnectionError: + print("Network error. Check internet connection.") +``` + +--- + +## Multi-Turn Conversations + +The API is stateless — send the full conversation history each time. + +```python +class ConversationManager: + """Manage multi-turn conversations with the Claude API.""" + + def __init__(self, client: anthropic.Anthropic, model: str, system: str = None): + self.client = client + self.model = model + self.system = system + self.messages = [] + + def send(self, user_message: str, **kwargs) -> str: + """Send a message and get a response.""" + self.messages.append({"role": "user", "content": user_message}) + + response = self.client.messages.create( + model=self.model, + max_tokens=kwargs.get("max_tokens", 1024), + system=self.system, + messages=self.messages, + **kwargs + ) + + assistant_message = response.content[0].text + self.messages.append({"role": "assistant", "content": assistant_message}) + + return assistant_message + +# Usage +conversation = ConversationManager( + client=anthropic.Anthropic(), + model="claude-opus-4-6", + system="You are a helpful assistant." +) + +response1 = conversation.send("My name is Alice.") +response2 = conversation.send("What's my name?") # Claude remembers "Alice" +``` + +**Rules:** + +- Messages must alternate between `user` and `assistant` +- First message must be `user` + +--- + +### Compaction (long conversations) + +> **Beta, Opus 4.6 only.** When conversations approach the 200K context window, compaction automatically summarizes earlier context server-side. The API returns a `compaction` block; you must pass it back on subsequent requests — append `response.content`, not just the text. + +```python +import anthropic + +client = anthropic.Anthropic() +messages = [] + +def chat(user_message: str) -> str: + messages.append({"role": "user", "content": user_message}) + + response = client.beta.messages.create( + betas=["compact-2026-01-12"], + model="claude-opus-4-6", + max_tokens=4096, + messages=messages, + context_management={ + "edits": [{"type": "compact_20260112"}] + } + ) + + # Append full content — compaction blocks must be preserved + messages.append({"role": "assistant", "content": response.content}) + + return next(block.text for block in response.content if block.type == "text") + +# Compaction triggers automatically when context grows large +print(chat("Help me build a Python web scraper")) +print(chat("Add support for JavaScript-rendered pages")) +print(chat("Now add rate limiting and error handling")) +``` + +--- + +## Stop Reasons + +The `stop_reason` field in the response indicates why the model stopped generating: + +| Value | Meaning | +|-------|---------| +| `end_turn` | Claude finished its response naturally | +| `max_tokens` | Hit the `max_tokens` limit — increase it or use streaming | +| `stop_sequence` | Hit a custom stop sequence | +| `tool_use` | Claude wants to call a tool — execute it and continue | +| `pause_turn` | Model paused and can be resumed (agentic flows) | +| `refusal` | Claude refused for safety reasons — output may not match your schema | + +--- + +## Cost Optimization Strategies + +### 1. Use Prompt Caching for Repeated Context + +```python +# Automatic caching (simplest — caches the last cacheable block) +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + cache_control={"type": "ephemeral"}, + system=large_document_text, # e.g., 50KB of context + messages=[{"role": "user", "content": "Summarize the key points"}] +) + +# First request: full cost +# Subsequent requests: ~90% cheaper for cached portion +``` + +### 2. Choose the Right Model + +```python +# Default to Opus for most tasks +response = client.messages.create( + model="claude-opus-4-6", # $5.00/$25.00 per 1M tokens + max_tokens=1024, + messages=[{"role": "user", "content": "Explain quantum computing"}] +) + +# Use Sonnet for high-volume production workloads +standard_response = client.messages.create( + model="claude-sonnet-4-6", # $3.00/$15.00 per 1M tokens + max_tokens=1024, + messages=[{"role": "user", "content": "Summarize this document"}] +) + +# Use Haiku only for simple, speed-critical tasks +simple_response = client.messages.create( + model="claude-haiku-4-5", # $1.00/$5.00 per 1M tokens + max_tokens=256, + messages=[{"role": "user", "content": "Classify this as positive or negative"}] +) +``` + +### 3. Use Token Counting Before Requests + +```python +count_response = client.messages.count_tokens( + model="claude-opus-4-6", + messages=messages, + system=system +) + +estimated_input_cost = count_response.input_tokens * 0.000005 # $5/1M tokens +print(f"Estimated input cost: ${estimated_input_cost:.4f}") +``` + +--- + +## Retry with Exponential Backoff + +> **Note:** The Anthropic SDK automatically retries rate limit (429) and server errors (5xx) with exponential backoff. You can configure this with `max_retries` (default: 2). Only implement custom retry logic if you need behavior beyond what the SDK provides. + +```python +import time +import random +import anthropic + +def call_with_retry( + client: anthropic.Anthropic, + max_retries: int = 5, + base_delay: float = 1.0, + max_delay: float = 60.0, + **kwargs +): + """Call the API with exponential backoff retry.""" + last_exception = None + + for attempt in range(max_retries): + try: + return client.messages.create(**kwargs) + except anthropic.RateLimitError as e: + last_exception = e + except anthropic.APIStatusError as e: + if e.status_code >= 500: + last_exception = e + else: + raise # Client errors (4xx except 429) should not be retried + + delay = min(base_delay * (2 ** attempt) + random.uniform(0, 1), max_delay) + print(f"Retry {attempt + 1}/{max_retries} after {delay:.1f}s") + time.sleep(delay) + + raise last_exception +``` diff --git a/python/claude-api/batches.md b/python/claude-api/batches.md new file mode 100644 index 0000000..c902768 --- /dev/null +++ b/python/claude-api/batches.md @@ -0,0 +1,182 @@ +# Message Batches API — Python + +The Batches API (`POST /v1/messages/batches`) processes Messages API requests asynchronously at 50% of standard prices. + +## Key Facts + +- Up to 100,000 requests or 256 MB per batch +- Most batches complete within 1 hour; maximum 24 hours +- Results available for 29 days after creation +- 50% cost reduction on all token usage +- All Messages API features supported (vision, tools, caching, etc.) + +--- + +## Create a Batch + +```python +import anthropic +from anthropic.types.message_create_params import MessageCreateParamsNonStreaming +from anthropic.types.messages.batch_create_params import Request + +client = anthropic.Anthropic() + +message_batch = client.messages.batches.create( + requests=[ + Request( + custom_id="request-1", + params=MessageCreateParamsNonStreaming( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Summarize climate change impacts"}] + ) + ), + Request( + custom_id="request-2", + params=MessageCreateParamsNonStreaming( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Explain quantum computing basics"}] + ) + ), + ] +) + +print(f"Batch ID: {message_batch.id}") +print(f"Status: {message_batch.processing_status}") +``` + +--- + +## Poll for Completion + +```python +import time + +while True: + batch = client.messages.batches.retrieve(message_batch.id) + if batch.processing_status == "ended": + break + print(f"Status: {batch.processing_status}, processing: {batch.request_counts.processing}") + time.sleep(60) + +print("Batch complete!") +print(f"Succeeded: {batch.request_counts.succeeded}") +print(f"Errored: {batch.request_counts.errored}") +``` + +--- + +## Retrieve Results + +> **Note:** Examples below use `match/case` syntax, requiring Python 3.10+. For earlier versions, use `if/elif` chains instead. + +```python +for result in client.messages.batches.results(message_batch.id): + match result.result.type: + case "succeeded": + print(f"[{result.custom_id}] {result.result.message.content[0].text[:100]}") + case "errored": + if result.result.error.type == "invalid_request": + print(f"[{result.custom_id}] Validation error - fix request and retry") + else: + print(f"[{result.custom_id}] Server error - safe to retry") + case "canceled": + print(f"[{result.custom_id}] Canceled") + case "expired": + print(f"[{result.custom_id}] Expired - resubmit") +``` + +--- + +## Cancel a Batch + +```python +cancelled = client.messages.batches.cancel(message_batch.id) +print(f"Status: {cancelled.processing_status}") # "canceling" +``` + +--- + +## Batch with Prompt Caching + +```python +shared_system = [ + {"type": "text", "text": "You are a literary analyst."}, + { + "type": "text", + "text": large_document_text, # Shared across all requests + "cache_control": {"type": "ephemeral"} + } +] + +message_batch = client.messages.batches.create( + requests=[ + Request( + custom_id=f"analysis-{i}", + params=MessageCreateParamsNonStreaming( + model="claude-opus-4-6", + max_tokens=1024, + system=shared_system, + messages=[{"role": "user", "content": question}] + ) + ) + for i, question in enumerate(questions) + ] +) +``` + +--- + +## Full End-to-End Example + +```python +import anthropic +import time +from anthropic.types.message_create_params import MessageCreateParamsNonStreaming +from anthropic.types.messages.batch_create_params import Request + +client = anthropic.Anthropic() + +# 1. Prepare requests +items_to_classify = [ + "The product quality is excellent!", + "Terrible customer service, never again.", + "It's okay, nothing special.", +] + +requests = [ + Request( + custom_id=f"classify-{i}", + params=MessageCreateParamsNonStreaming( + model="claude-haiku-4-5", + max_tokens=50, + messages=[{ + "role": "user", + "content": f"Classify as positive/negative/neutral (one word): {text}" + }] + ) + ) + for i, text in enumerate(items_to_classify) +] + +# 2. Create batch +batch = client.messages.batches.create(requests=requests) +print(f"Created batch: {batch.id}") + +# 3. Wait for completion +while True: + batch = client.messages.batches.retrieve(batch.id) + if batch.processing_status == "ended": + break + time.sleep(10) + +# 4. Collect results +results = {} +for result in client.messages.batches.results(batch.id): + if result.result.type == "succeeded": + results[result.custom_id] = result.result.message.content[0].text + +for custom_id, classification in sorted(results.items()): + print(f"{custom_id}: {classification}") +``` diff --git a/python/claude-api/files-api.md b/python/claude-api/files-api.md new file mode 100644 index 0000000..efcdd9c --- /dev/null +++ b/python/claude-api/files-api.md @@ -0,0 +1,162 @@ +# Files API — Python + +The Files API uploads files for use in Messages API requests. Reference files via `file_id` in content blocks, avoiding re-uploads across multiple API calls. + +**Beta:** Pass `betas=["files-api-2025-04-14"]` in your API calls (the SDK sets the required header automatically). + +## Key Facts + +- Maximum file size: 500 MB +- Total storage: 100 GB per organization +- Files persist until deleted +- File operations (upload, list, delete) are free; content used in messages is billed as input tokens +- Not available on Amazon Bedrock or Google Vertex AI + +--- + +## Upload a File + +```python +import anthropic + +client = anthropic.Anthropic() + +uploaded = client.beta.files.upload( + file=("report.pdf", open("report.pdf", "rb"), "application/pdf"), +) +print(f"File ID: {uploaded.id}") +print(f"Size: {uploaded.size_bytes} bytes") +``` + +--- + +## Use a File in Messages + +### PDF / Text Document + +```python +response = client.beta.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + {"type": "text", "text": "Summarize the key findings in this report."}, + { + "type": "document", + "source": {"type": "file", "file_id": uploaded.id}, + "title": "Q4 Report", # optional + "citations": {"enabled": True} # optional, enables citations + } + ] + }], + betas=["files-api-2025-04-14"], +) +print(response.content[0].text) +``` + +### Image + +```python +image_file = client.beta.files.upload( + file=("photo.png", open("photo.png", "rb"), "image/png"), +) + +response = client.beta.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image", + "source": {"type": "file", "file_id": image_file.id} + } + ] + }], + betas=["files-api-2025-04-14"], +) +``` + +--- + +## Manage Files + +### List Files + +```python +files = client.beta.files.list() +for f in files.data: + print(f"{f.id}: {f.filename} ({f.size_bytes} bytes)") +``` + +### Get File Metadata + +```python +file_info = client.beta.files.retrieve_metadata("file_011CNha8iCJcU1wXNR6q4V8w") +print(f"Filename: {file_info.filename}") +print(f"MIME type: {file_info.mime_type}") +``` + +### Delete a File + +```python +client.beta.files.delete("file_011CNha8iCJcU1wXNR6q4V8w") +``` + +### Download a File + +Only files created by the code execution tool or skills can be downloaded (not user-uploaded files). + +```python +file_content = client.beta.files.download("file_011CNha8iCJcU1wXNR6q4V8w") +file_content.write_to_file("output.txt") +``` + +--- + +## Full End-to-End Example + +Upload a document once, ask multiple questions about it: + +```python +import anthropic + +client = anthropic.Anthropic() + +# 1. Upload once +uploaded = client.beta.files.upload( + file=("contract.pdf", open("contract.pdf", "rb"), "application/pdf"), +) +print(f"Uploaded: {uploaded.id}") + +# 2. Ask multiple questions using the same file_id +questions = [ + "What are the key terms and conditions?", + "What is the termination clause?", + "Summarize the payment schedule.", +] + +for question in questions: + response = client.beta.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + {"type": "text", "text": question}, + { + "type": "document", + "source": {"type": "file", "file_id": uploaded.id} + } + ] + }], + betas=["files-api-2025-04-14"], + ) + print(f"\nQ: {question}") + print(f"A: {response.content[0].text[:200]}") + +# 3. Clean up when done +client.beta.files.delete(uploaded.id) +``` diff --git a/python/claude-api/streaming.md b/python/claude-api/streaming.md new file mode 100644 index 0000000..bb2a84e --- /dev/null +++ b/python/claude-api/streaming.md @@ -0,0 +1,162 @@ +# Streaming — Python + +## Quick Start + +```python +with client.messages.stream( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Write a story"}] +) as stream: + for text in stream.text_stream: + print(text, end="", flush=True) +``` + +### Async + +```python +async with async_client.messages.stream( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Write a story"}] +) as stream: + async for text in stream.text_stream: + print(text, end="", flush=True) +``` + +--- + +## Handling Different Content Types + +Claude may return text, thinking blocks, or tool use. Handle each appropriately: + +> **Opus 4.6:** Use `thinking: {type: "adaptive"}`. On older models, use `thinking: {type: "enabled", budget_tokens: N}` instead. + +```python +with client.messages.stream( + model="claude-opus-4-6", + max_tokens=16000, + thinking={"type": "adaptive"}, + messages=[{"role": "user", "content": "Analyze this problem"}] +) as stream: + for event in stream: + if event.type == "content_block_start": + if event.content_block.type == "thinking": + print("\n[Thinking...]") + elif event.content_block.type == "text": + print("\n[Response:]") + + elif event.type == "content_block_delta": + if event.delta.type == "thinking_delta": + print(event.delta.thinking, end="", flush=True) + elif event.delta.type == "text_delta": + print(event.delta.text, end="", flush=True) +``` + +--- + +## Streaming with Tool Use + +The Python tool runner currently returns complete messages. Use streaming for individual API calls within a manual loop if you need per-token streaming with tools: + +```python +with client.messages.stream( + model="claude-opus-4-6", + max_tokens=4096, + tools=tools, + messages=messages +) as stream: + for text in stream.text_stream: + print(text, end="", flush=True) + + response = stream.get_final_message() + # Continue with tool execution if response.stop_reason == "tool_use" +``` + +--- + +## Getting the Final Message + +```python +with client.messages.stream( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Hello"}] +) as stream: + for text in stream.text_stream: + print(text, end="", flush=True) + + # Get full message after streaming + final_message = stream.get_final_message() + print(f"\n\nTokens used: {final_message.usage.output_tokens}") +``` + +--- + +## Streaming with Progress Updates + +```python +def stream_with_progress(client, **kwargs): + """Stream a response with progress updates.""" + total_tokens = 0 + content_parts = [] + + with client.messages.stream(**kwargs) as stream: + for event in stream: + if event.type == "content_block_delta": + if event.delta.type == "text_delta": + text = event.delta.text + content_parts.append(text) + print(text, end="", flush=True) + + elif event.type == "message_delta": + if event.usage and event.usage.output_tokens is not None: + total_tokens = event.usage.output_tokens + + final_message = stream.get_final_message() + + print(f"\n\n[Tokens used: {total_tokens}]") + return "".join(content_parts) +``` + +--- + +## Error Handling in Streams + +```python +try: + with client.messages.stream( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Write a story"}] + ) as stream: + for text in stream.text_stream: + print(text, end="", flush=True) +except anthropic.APIConnectionError: + print("\nConnection lost. Please retry.") +except anthropic.RateLimitError: + print("\nRate limited. Please wait and retry.") +except anthropic.APIStatusError as e: + print(f"\nAPI error: {e.status_code}") +``` + +--- + +## Stream Event Types + +| Event Type | Description | When it fires | +| --------------------- | --------------------------- | --------------------------------- | +| `message_start` | Contains message metadata | Once at the beginning | +| `content_block_start` | New content block beginning | When a text/tool_use block starts | +| `content_block_delta` | Incremental content update | For each token/chunk | +| `content_block_stop` | Content block complete | When a block finishes | +| `message_delta` | Message-level updates | Contains `stop_reason`, usage | +| `message_stop` | Message complete | Once at the end | + +## Best Practices + +1. **Always flush output** — Use `flush=True` to show tokens immediately +2. **Handle partial responses** — If the stream is interrupted, you may have incomplete content +3. **Track token usage** — The `message_delta` event contains usage information +4. **Use timeouts** — Set appropriate timeouts for your application +5. **Default to streaming** — Use `.get_final_message()` to get the complete response even when streaming, giving you timeout protection without needing to handle individual events diff --git a/python/claude-api/tool-use.md b/python/claude-api/tool-use.md new file mode 100644 index 0000000..f347de5 --- /dev/null +++ b/python/claude-api/tool-use.md @@ -0,0 +1,587 @@ +# Tool Use — Python + +For conceptual overview (tool definitions, tool choice, tips), see [shared/tool-use-concepts.md](../../shared/tool-use-concepts.md). + +## Tool Runner (Recommended) + +**Beta:** The tool runner is in beta in the Python SDK. + +Use the `@beta_tool` decorator to define tools as typed functions, then pass them to `client.beta.messages.tool_runner()`: + +```python +import anthropic +from anthropic import beta_tool + +client = anthropic.Anthropic() + +@beta_tool +def get_weather(location: str, unit: str = "celsius") -> str: + """Get current weather for a location. + + Args: + location: City and state, e.g., San Francisco, CA. + unit: Temperature unit, either "celsius" or "fahrenheit". + """ + # Your implementation here + return f"72°F and sunny in {location}" + +# The tool runner handles the agentic loop automatically +runner = client.beta.messages.tool_runner( + model="claude-opus-4-6", + max_tokens=4096, + tools=[get_weather], + messages=[{"role": "user", "content": "What's the weather in Paris?"}], +) + +# Each iteration yields a BetaMessage; iteration stops when Claude is done +for message in runner: + print(message) +``` + +For async usage, use `@beta_async_tool` with `async def` functions. + +**Key benefits of the tool runner:** + +- No manual loop — the SDK handles calling tools and feeding results back +- Type-safe tool inputs via decorators +- Tool schemas are generated automatically from function signatures +- Iteration stops automatically when Claude has no more tool calls + +--- + +## MCP Tool Conversion Helpers + +**Beta.** Convert [MCP (Model Context Protocol)](https://modelcontextprotocol.io/) tools, prompts, and resources to Anthropic API types for use with the tool runner. Requires `pip install anthropic[mcp]` (Python 3.10+). + +> **Note:** The Claude API also supports an `mcp_servers` parameter that lets Claude connect directly to remote MCP servers. Use these helpers instead when you need local MCP servers, prompts, resources, or more control over the MCP connection. + +### MCP Tools with Tool Runner + +```python +from anthropic import AsyncAnthropic +from anthropic.lib.tools.mcp import async_mcp_tool +from mcp import ClientSession +from mcp.client.stdio import stdio_client, StdioServerParameters + +client = AsyncAnthropic() + +async with stdio_client(StdioServerParameters(command="mcp-server")) as (read, write): + async with ClientSession(read, write) as mcp_client: + await mcp_client.initialize() + + tools_result = await mcp_client.list_tools() + runner = await client.beta.messages.tool_runner( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Use the available tools"}], + tools=[async_mcp_tool(t, mcp_client) for t in tools_result.tools], + ) + async for message in runner: + print(message) +``` + +For sync usage, use `mcp_tool` instead of `async_mcp_tool`. + +### MCP Prompts + +```python +from anthropic.lib.tools.mcp import mcp_message + +prompt = await mcp_client.get_prompt(name="my-prompt") +response = await client.beta.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[mcp_message(m) for m in prompt.messages], +) +``` + +### MCP Resources as Content + +```python +from anthropic.lib.tools.mcp import mcp_resource_to_content + +resource = await mcp_client.read_resource(uri="file:///path/to/doc.txt") +response = await client.beta.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + mcp_resource_to_content(resource), + {"type": "text", "text": "Summarize this document"}, + ], + }], +) +``` + +### Upload MCP Resources as Files + +```python +from anthropic.lib.tools.mcp import mcp_resource_to_file + +resource = await mcp_client.read_resource(uri="file:///path/to/data.json") +uploaded = await client.beta.files.upload(file=mcp_resource_to_file(resource)) +``` + +Conversion functions raise `UnsupportedMCPValueError` if an MCP value cannot be converted (e.g., unsupported content types like audio, unsupported MIME types). + +--- + +## Manual Agentic Loop + +Use this when you need fine-grained control over the loop (e.g., custom logging, conditional tool execution, human-in-the-loop approval): + +```python +import anthropic + +client = anthropic.Anthropic() +tools = [...] # Your tool definitions +messages = [{"role": "user", "content": user_input}] + +# Agentic loop: keep going until Claude stops calling tools +while True: + response = client.messages.create( + model="claude-opus-4-6", + max_tokens=4096, + tools=tools, + messages=messages + ) + + # If Claude is done (no more tool calls), break + if response.stop_reason == "end_turn": + break + + # Server-side tool hit iteration limit; re-send to continue + if response.stop_reason == "pause_turn": + messages = [ + {"role": "user", "content": user_input}, + {"role": "assistant", "content": response.content}, + ] + continue + + # Extract tool use blocks from the response + tool_use_blocks = [b for b in response.content if b.type == "tool_use"] + + # Append assistant's response (including tool_use blocks) + messages.append({"role": "assistant", "content": response.content}) + + # Execute each tool and collect results + tool_results = [] + for tool in tool_use_blocks: + result = execute_tool(tool.name, tool.input) # Your implementation + tool_results.append({ + "type": "tool_result", + "tool_use_id": tool.id, # Must match the tool_use block's id + "content": result + }) + + # Append tool results as a user message + messages.append({"role": "user", "content": tool_results}) + +# Final response text +final_text = next(b.text for b in response.content if b.type == "text") +``` + +--- + +## Handling Tool Results + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + tools=tools, + messages=[{"role": "user", "content": "What's the weather in Paris?"}] +) + +for block in response.content: + if block.type == "tool_use": + tool_name = block.name + tool_input = block.input + tool_use_id = block.id + + result = execute_tool(tool_name, tool_input) + + followup = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + tools=tools, + messages=[ + {"role": "user", "content": "What's the weather in Paris?"}, + {"role": "assistant", "content": response.content}, + { + "role": "user", + "content": [{ + "type": "tool_result", + "tool_use_id": tool_use_id, + "content": result + }] + } + ] + ) +``` + +--- + +## Multiple Tool Calls + +```python +tool_results = [] + +for block in response.content: + if block.type == "tool_use": + result = execute_tool(block.name, block.input) + tool_results.append({ + "type": "tool_result", + "tool_use_id": block.id, + "content": result + }) + +# Send all results back at once +if tool_results: + followup = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + tools=tools, + messages=[ + *previous_messages, + {"role": "assistant", "content": response.content}, + {"role": "user", "content": tool_results} + ] + ) +``` + +--- + +## Error Handling in Tool Results + +```python +tool_result = { + "type": "tool_result", + "tool_use_id": tool_use_id, + "content": "Error: Location 'xyz' not found. Please provide a valid city name.", + "is_error": True +} +``` + +--- + +## Tool Choice + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + tools=tools, + tool_choice={"type": "tool", "name": "get_weather"}, # Force specific tool + messages=[{"role": "user", "content": "What's the weather in Paris?"}] +) +``` + +--- + +## Code Execution + +### Basic Usage + +```python +import anthropic + +client = anthropic.Anthropic() + +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=4096, + messages=[{ + "role": "user", + "content": "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" + }], + tools=[{ + "type": "code_execution_20260120", + "name": "code_execution" + }] +) + +for block in response.content: + if block.type == "text": + print(block.text) + elif block.type == "bash_code_execution_tool_result": + print(f"stdout: {block.content.stdout}") +``` + +### Upload Files for Analysis + +```python +# 1. Upload a file +uploaded = client.beta.files.upload(file=open("sales_data.csv", "rb")) + +# 2. Pass to code execution via container_upload block +# Code execution is GA; Files API is still beta (pass via extra_headers) +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=4096, + extra_headers={"anthropic-beta": "files-api-2025-04-14"}, + messages=[{ + "role": "user", + "content": [ + {"type": "text", "text": "Analyze this sales data. Show trends and create a visualization."}, + {"type": "container_upload", "file_id": uploaded.id} + ] + }], + tools=[{"type": "code_execution_20260120", "name": "code_execution"}] +) +``` + +### Retrieve Generated Files + +```python +import os + +OUTPUT_DIR = "./claude_outputs" +os.makedirs(OUTPUT_DIR, exist_ok=True) + +for block in response.content: + if block.type == "bash_code_execution_tool_result": + result = block.content + if result.type == "bash_code_execution_result" and result.content: + for file_ref in result.content: + if file_ref.type == "bash_code_execution_output": + metadata = client.beta.files.retrieve_metadata(file_ref.file_id) + file_content = client.beta.files.download(file_ref.file_id) + # Use basename to prevent path traversal; validate result + safe_name = os.path.basename(metadata.filename) + if not safe_name or safe_name in (".", ".."): + print(f"Skipping invalid filename: {metadata.filename}") + continue + output_path = os.path.join(OUTPUT_DIR, safe_name) + file_content.write_to_file(output_path) + print(f"Saved: {output_path}") +``` + +### Container Reuse + +```python +# First request: set up environment +response1 = client.messages.create( + model="claude-opus-4-6", + max_tokens=4096, + messages=[{"role": "user", "content": "Install tabulate and create data.json with sample data"}], + tools=[{"type": "code_execution_20260120", "name": "code_execution"}] +) + +# Get container ID from response +container_id = response1.container.id + +# Second request: reuse the same container +response2 = client.messages.create( + container=container_id, + model="claude-opus-4-6", + max_tokens=4096, + messages=[{"role": "user", "content": "Read data.json and display as a formatted table"}], + tools=[{"type": "code_execution_20260120", "name": "code_execution"}] +) +``` + +### Response Structure + +```python +for block in response.content: + if block.type == "text": + print(block.text) # Claude's explanation + elif block.type == "server_tool_use": + print(f"Running: {block.name} - {block.input}") # What Claude is doing + elif block.type == "bash_code_execution_tool_result": + result = block.content + if result.type == "bash_code_execution_result": + if result.return_code == 0: + print(f"Output: {result.stdout}") + else: + print(f"Error: {result.stderr}") + else: + print(f"Tool error: {result.error_code}") + elif block.type == "text_editor_code_execution_tool_result": + print(f"File operation: {block.content}") +``` + +--- + +## Memory Tool + +### Basic Usage + +```python +import anthropic + +client = anthropic.Anthropic() + +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=2048, + messages=[{"role": "user", "content": "Remember that my preferred language is Python."}], + tools=[{"type": "memory_20250818", "name": "memory"}], +) +``` + +### SDK Memory Helper + +Subclass `BetaAbstractMemoryTool`: + +```python +from anthropic.lib.tools import BetaAbstractMemoryTool + +class MyMemoryTool(BetaAbstractMemoryTool): + def view(self, command): ... + def create(self, command): ... + def str_replace(self, command): ... + def insert(self, command): ... + def delete(self, command): ... + def rename(self, command): ... + +memory = MyMemoryTool() + +# Use with tool runner +runner = client.beta.messages.tool_runner( + model="claude-opus-4-6", + max_tokens=2048, + tools=[memory], + messages=[{"role": "user", "content": "Remember my preferences"}], +) + +for message in runner: + print(message) +``` + +For full implementation examples, use WebFetch: + +- `https://github.com/anthropics/anthropic-sdk-python/blob/main/examples/memory/basic.py` + +--- + +## Structured Outputs + +### JSON Outputs (Pydantic — Recommended) + +```python +from pydantic import BaseModel +from typing import List +import anthropic + +class ContactInfo(BaseModel): + name: str + email: str + plan: str + interests: List[str] + demo_requested: bool + +client = anthropic.Anthropic() + +response = client.messages.parse( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": "Extract: Jane Doe (jane@co.com) wants Enterprise, interested in API and SDKs, wants a demo." + }], + output_format=ContactInfo, +) + +# response.parsed_output is a validated ContactInfo instance +contact = response.parsed_output +print(contact.name) # "Jane Doe" +print(contact.interests) # ["API", "SDKs"] +``` + +### Raw Schema + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": "Extract info: John Smith (john@example.com) wants the Enterprise plan." + }], + output_config={ + "format": { + "type": "json_schema", + "schema": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "email": {"type": "string"}, + "plan": {"type": "string"}, + "demo_requested": {"type": "boolean"} + }, + "required": ["name", "email", "plan", "demo_requested"], + "additionalProperties": False + } + } + } +) + +import json +data = json.loads(response.content[0].text) +``` + +### Strict Tool Use + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Book a flight to Tokyo for 2 passengers on March 15"}], + tools=[{ + "name": "book_flight", + "description": "Book a flight to a destination", + "strict": True, + "input_schema": { + "type": "object", + "properties": { + "destination": {"type": "string"}, + "date": {"type": "string", "format": "date"}, + "passengers": {"type": "integer", "enum": [1, 2, 3, 4, 5, 6, 7, 8]} + }, + "required": ["destination", "date", "passengers"], + "additionalProperties": False + } + }] +) +``` + +### Using Both Together + +```python +response = client.messages.create( + model="claude-opus-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Plan a trip to Paris next month"}], + output_config={ + "format": { + "type": "json_schema", + "schema": { + "type": "object", + "properties": { + "summary": {"type": "string"}, + "next_steps": {"type": "array", "items": {"type": "string"}} + }, + "required": ["summary", "next_steps"], + "additionalProperties": False + } + } + }, + tools=[{ + "name": "search_flights", + "description": "Search for available flights", + "strict": True, + "input_schema": { + "type": "object", + "properties": { + "destination": {"type": "string"}, + "date": {"type": "string", "format": "date"} + }, + "required": ["destination", "date"], + "additionalProperties": False + } + }] +) +``` diff --git a/ruby/claude-api.md b/ruby/claude-api.md new file mode 100644 index 0000000..912bc18 --- /dev/null +++ b/ruby/claude-api.md @@ -0,0 +1,87 @@ +# Claude API — Ruby + +> **Note:** The Ruby SDK supports the Claude API. A tool runner is available in beta via `client.beta.messages.tool_runner()`. Agent SDK is not yet available for Ruby. + +## Installation + +```bash +gem install anthropic +``` + +## Client Initialization + +```ruby +require "anthropic" + +# Default (uses ANTHROPIC_API_KEY env var) +client = Anthropic::Client.new + +# Explicit API key +client = Anthropic::Client.new(api_key: "your-api-key") +``` + +--- + +## Basic Message Request + +```ruby +message = client.messages.create( + model: :"claude-opus-4-6", + max_tokens: 1024, + messages: [ + { role: "user", content: "What is the capital of France?" } + ] +) +puts message.content.first.text +``` + +--- + +## Streaming + +```ruby +stream = client.messages.stream( + model: :"claude-opus-4-6", + max_tokens: 1024, + messages: [{ role: "user", content: "Write a haiku" }] +) + +stream.text.each { |text| print(text) } +``` + +--- + +## Tool Use + +The Ruby SDK supports tool use via raw JSON schema definitions and also provides a beta tool runner for automatic tool execution. + +### Tool Runner (Beta) + +```ruby +class GetWeatherInput < Anthropic::BaseModel + required :location, String, doc: "City and state, e.g. San Francisco, CA" +end + +class GetWeather < Anthropic::BaseTool + doc "Get the current weather for a location" + + input_schema GetWeatherInput + + def call(input) + "The weather in #{input.location} is sunny and 72°F." + end +end + +client.beta.messages.tool_runner( + model: :"claude-opus-4-6", + max_tokens: 1024, + tools: [GetWeather.new], + messages: [{ role: "user", content: "What's the weather in San Francisco?" }] +).each_message do |message| + puts message.content +end +``` + +### Manual Loop + +See the [shared tool use concepts](../shared/tool-use-concepts.md) for the tool definition format and agentic loop pattern. diff --git a/shared/error-codes.md b/shared/error-codes.md new file mode 100644 index 0000000..9f207ba --- /dev/null +++ b/shared/error-codes.md @@ -0,0 +1,205 @@ +# HTTP Error Codes Reference + +This file documents HTTP error codes returned by the Claude API, their common causes, and how to handle them. For language-specific error handling examples, see the `python/` or `typescript/` folders. + +## Error Code Summary + +| Code | Error Type | Retryable | Common Cause | +| ---- | ----------------------- | --------- | ------------------------------------ | +| 400 | `invalid_request_error` | No | Invalid request format or parameters | +| 401 | `authentication_error` | No | Invalid or missing API key | +| 403 | `permission_error` | No | API key lacks permission | +| 404 | `not_found_error` | No | Invalid endpoint or model ID | +| 413 | `request_too_large` | No | Request exceeds size limits | +| 429 | `rate_limit_error` | Yes | Too many requests | +| 500 | `api_error` | Yes | Anthropic service issue | +| 529 | `overloaded_error` | Yes | API is temporarily overloaded | + +## Detailed Error Information + +### 400 Bad Request + +**Causes:** + +- Malformed JSON in request body +- Missing required parameters (`model`, `max_tokens`, `messages`) +- Invalid parameter types (e.g., string where integer expected) +- Empty messages array +- Messages not alternating user/assistant + +**Example error:** + +```json +{ + "type": "error", + "error": { + "type": "invalid_request_error", + "message": "messages: roles must alternate between \"user\" and \"assistant\"" + } +} +``` + +**Fix:** Validate request structure before sending. Check that: + +- `model` is a valid model ID +- `max_tokens` is a positive integer +- `messages` array is non-empty and alternates correctly + +--- + +### 401 Unauthorized + +**Causes:** + +- Missing `x-api-key` header or `Authorization` header +- Invalid API key format +- Revoked or deleted API key + +**Fix:** Ensure `ANTHROPIC_API_KEY` environment variable is set correctly. + +--- + +### 403 Forbidden + +**Causes:** + +- API key doesn't have access to the requested model +- Organization-level restrictions +- Attempting to access beta features without beta access + +**Fix:** Check your API key permissions in the Console. You may need a different API key or to request access to specific features. + +--- + +### 404 Not Found + +**Causes:** + +- Typo in model ID (e.g., `claude-sonnet-4.6` instead of `claude-sonnet-4-6`) +- Using deprecated model ID +- Invalid API endpoint + +**Fix:** Use exact model IDs from the models documentation. You can use aliases (e.g., `claude-opus-4-6`). + +--- + +### 413 Request Too Large + +**Causes:** + +- Request body exceeds maximum size +- Too many tokens in input +- Image data too large + +**Fix:** Reduce input size — truncate conversation history, compress/resize images, or split large documents into chunks. + +--- + +### 400 Validation Errors + +Some 400 errors are specifically related to parameter validation: + +- `max_tokens` exceeds model's limit +- Invalid `temperature` value (must be 0.0-1.0) +- `budget_tokens` >= `max_tokens` in extended thinking +- Invalid tool definition schema + +**Common mistake with extended thinking:** + +``` +# Wrong: budget_tokens must be < max_tokens +thinking: budget_tokens=10000, max_tokens=1000 → Error! + +# Correct +thinking: budget_tokens=10000, max_tokens=16000 +``` + +--- + +### 429 Rate Limited + +**Causes:** + +- Exceeded requests per minute (RPM) +- Exceeded tokens per minute (TPM) +- Exceeded tokens per day (TPD) + +**Headers to check:** + +- `retry-after`: Seconds to wait before retrying +- `x-ratelimit-limit-*`: Your limits +- `x-ratelimit-remaining-*`: Remaining quota + +**Fix:** The Anthropic SDKs automatically retry 429 and 5xx errors with exponential backoff (default: `max_retries=2`). For custom retry behavior, see the language-specific error handling examples. + +--- + +### 500 Internal Server Error + +**Causes:** + +- Temporary Anthropic service issue +- Bug in API processing + +**Fix:** Retry with exponential backoff. If persistent, check [status.anthropic.com](https://status.anthropic.com). + +--- + +### 529 Overloaded + +**Causes:** + +- High API demand +- Service capacity reached + +**Fix:** Retry with exponential backoff. Consider using a different model (Haiku is often less loaded), spreading requests over time, or implementing request queuing. + +--- + +## Common Mistakes and Fixes + +| Mistake | Error | Fix | +| ------------------------------- | ---------------- | ------------------------------------------------------- | +| `budget_tokens` >= `max_tokens` | 400 | Ensure `budget_tokens` < `max_tokens` | +| Typo in model ID | 404 | Use valid model ID like `claude-opus-4-6` | +| First message is `assistant` | 400 | First message must be `user` | +| Consecutive same-role messages | 400 | Alternate `user` and `assistant` | +| API key in code | 401 (leaked key) | Use environment variable | +| Custom retry needs | 429/5xx | SDK retries automatically; customize with `max_retries` | + +## Typed Exceptions in SDKs + +**Always use the SDK's typed exception classes** instead of checking error messages with string matching. Each HTTP error code maps to a specific exception class: + +| HTTP Code | TypeScript Class | Python Class | +| --------- | --------------------------------- | --------------------------------- | +| 400 | `Anthropic.BadRequestError` | `anthropic.BadRequestError` | +| 401 | `Anthropic.AuthenticationError` | `anthropic.AuthenticationError` | +| 403 | `Anthropic.PermissionDeniedError` | `anthropic.PermissionDeniedError` | +| 404 | `Anthropic.NotFoundError` | `anthropic.NotFoundError` | +| 429 | `Anthropic.RateLimitError` | `anthropic.RateLimitError` | +| 500+ | `Anthropic.InternalServerError` | `anthropic.InternalServerError` | +| Any | `Anthropic.APIError` | `anthropic.APIError` | + +```typescript +// ✅ Correct: use typed exceptions +try { + const response = await client.messages.create({...}); +} catch (error) { + if (error instanceof Anthropic.RateLimitError) { + // Handle rate limiting + } else if (error instanceof Anthropic.APIError) { + console.error(`API error ${error.status}:`, error.message); + } +} + +// ❌ Wrong: don't check error messages with string matching +try { + const response = await client.messages.create({...}); +} catch (error) { + const msg = error instanceof Error ? error.message : String(error); + if (msg.includes("429") || msg.includes("rate_limit")) { ... } +} +``` + +All exception classes extend `Anthropic.APIError`, which has a `status` property. Use `instanceof` checks from most specific to least specific (e.g., check `RateLimitError` before `APIError`). diff --git a/shared/live-sources.md b/shared/live-sources.md new file mode 100644 index 0000000..0b9ebed --- /dev/null +++ b/shared/live-sources.md @@ -0,0 +1,121 @@ +# Live Documentation Sources + +This file contains WebFetch URLs for fetching current information from platform.claude.com and Agent SDK repositories. Use these when users need the latest data that may have changed since the cached content was last updated. + +## When to Use WebFetch + +- User explicitly asks for "latest" or "current" information +- Cached data seems incorrect +- User asks about features not covered in cached content +- User needs specific API details or examples + +## Claude API Documentation URLs + +### Models & Pricing + +| Topic | URL | Extraction Prompt | +| --------------- | --------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| Models Overview | `https://platform.claude.com/docs/en/about-claude/models/overview.md` | "Extract current model IDs, context windows, and pricing for all Claude models" | +| Pricing | `https://platform.claude.com/docs/en/pricing.md` | "Extract current pricing per million tokens for input and output" | + +### Core Features + +| Topic | URL | Extraction Prompt | +| ----------------- | ---------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| Extended Thinking | `https://platform.claude.com/docs/en/build-with-claude/extended-thinking.md` | "Extract extended thinking parameters, budget_tokens requirements, and usage examples" | +| Adaptive Thinking | `https://platform.claude.com/docs/en/build-with-claude/adaptive-thinking.md` | "Extract adaptive thinking setup, effort levels, and Claude Opus 4.6 usage examples" | +| Effort Parameter | `https://platform.claude.com/docs/en/build-with-claude/effort.md` | "Extract effort levels, cost-quality tradeoffs, and interaction with thinking" | +| Tool Use | `https://platform.claude.com/docs/en/agents-and-tools/tool-use/overview.md` | "Extract tool definition schema, tool_choice options, and handling tool results" | +| Streaming | `https://platform.claude.com/docs/en/build-with-claude/streaming.md` | "Extract streaming event types, SDK examples, and best practices" | +| Prompt Caching | `https://platform.claude.com/docs/en/build-with-claude/prompt-caching.md` | "Extract cache_control usage, pricing benefits, and implementation examples" | + +### Media & Files + +| Topic | URL | Extraction Prompt | +| ----------- | ---------------------------------------------------------------------- | ----------------------------------------------------------------- | +| Vision | `https://platform.claude.com/docs/en/build-with-claude/vision.md` | "Extract supported image formats, size limits, and code examples" | +| PDF Support | `https://platform.claude.com/docs/en/build-with-claude/pdf-support.md` | "Extract PDF handling capabilities, limits, and examples" | + +### API Operations + +| Topic | URL | Extraction Prompt | +| ---------------- | --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | +| Batch Processing | `https://platform.claude.com/docs/en/build-with-claude/batch-processing.md` | "Extract batch API endpoints, request format, and polling for results" | +| Files API | `https://platform.claude.com/docs/en/build-with-claude/files.md` | "Extract file upload, download, and referencing in messages, including supported types and beta header" | +| Token Counting | `https://platform.claude.com/docs/en/build-with-claude/token-counting.md` | "Extract token counting API usage and examples" | +| Rate Limits | `https://platform.claude.com/docs/en/api/rate-limits.md` | "Extract current rate limits by tier and model" | +| Errors | `https://platform.claude.com/docs/en/api/errors.md` | "Extract HTTP error codes, meanings, and retry guidance" | + +### Tools + +| Topic | URL | Extraction Prompt | +| -------------- | -------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| Code Execution | `https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool.md` | "Extract code execution tool setup, file upload, container reuse, and response handling" | +| Computer Use | `https://platform.claude.com/docs/en/agents-and-tools/tool-use/computer-use.md` | "Extract computer use tool setup, capabilities, and implementation examples" | + +### Advanced Features + +| Topic | URL | Extraction Prompt | +| ------------------ | ----------------------------------------------------------------------------- | --------------------------------------------------- | +| Structured Outputs | `https://platform.claude.com/docs/en/build-with-claude/structured-outputs.md` | "Extract output_config.format usage and schema enforcement" | +| Compaction | `https://platform.claude.com/docs/en/build-with-claude/compaction.md` | "Extract compaction setup, trigger config, and streaming with compaction" | +| Citations | `https://platform.claude.com/docs/en/build-with-claude/citations.md` | "Extract citation format and implementation" | +| Context Windows | `https://platform.claude.com/docs/en/build-with-claude/context-windows.md` | "Extract context window sizes and token management" | + +--- + +## Claude API SDK Repositories + +| SDK | URL | Description | +| ---------- | --------------------------------------------------------- | ------------------------------ | +| Python | `https://github.com/anthropics/anthropic-sdk-python` | `anthropic` pip package source | +| TypeScript | `https://github.com/anthropics/anthropic-sdk-typescript` | `@anthropic-ai/sdk` npm source | +| Java | `https://github.com/anthropics/anthropic-sdk-java` | `anthropic-java` Maven source | +| Go | `https://github.com/anthropics/anthropic-sdk-go` | Go module source | +| Ruby | `https://github.com/anthropics/anthropic-sdk-ruby` | `anthropic` gem source | +| C# | `https://github.com/anthropics/anthropic-sdk-csharp` | NuGet package source | +| PHP | `https://github.com/anthropics/anthropic-sdk-php` | Composer package source | + +--- + +## Agent SDK Documentation URLs + +### Core Documentation + +| Topic | URL | Extraction Prompt | +| -------------------- | ----------------------------------------------------------- | --------------------------------------------------------------- | +| Agent SDK Overview | `https://platform.claude.com/docs/en/agent-sdk.md` | "Extract the Agent SDK overview, key features, and use cases" | +| Agent SDK Python | `https://github.com/anthropics/claude-agent-sdk-python` | "Extract Python SDK installation, imports, and basic usage" | +| Agent SDK TypeScript | `https://github.com/anthropics/claude-agent-sdk-typescript` | "Extract TypeScript SDK installation, imports, and basic usage" | + +### SDK Reference (GitHub READMEs) + +| Topic | URL | Extraction Prompt | +| -------------- | ----------------------------------------------------------------------------------------- | ------------------------------------------------------------ | +| Python SDK | `https://raw.githubusercontent.com/anthropics/claude-agent-sdk-python/main/README.md` | "Extract Python SDK API reference, classes, and methods" | +| TypeScript SDK | `https://raw.githubusercontent.com/anthropics/claude-agent-sdk-typescript/main/README.md` | "Extract TypeScript SDK API reference, types, and functions" | + +### npm/PyPI Packages + +| Package | URL | Description | +| ----------------------------------- | -------------------------------------------------------------- | ------------------------- | +| claude-agent-sdk (Python) | `https://pypi.org/project/claude-agent-sdk/` | Python package on PyPI | +| @anthropic-ai/claude-agent-sdk (TS) | `https://www.npmjs.com/package/@anthropic-ai/claude-agent-sdk` | TypeScript package on npm | + +### GitHub Repositories + +| Resource | URL | Description | +| -------------- | ----------------------------------------------------------- | ----------------------------------- | +| Python SDK | `https://github.com/anthropics/claude-agent-sdk-python` | Python package source | +| TypeScript SDK | `https://github.com/anthropics/claude-agent-sdk-typescript` | TypeScript/Node.js package source | +| MCP Servers | `https://github.com/modelcontextprotocol` | Official MCP server implementations | + +--- + +## Fallback Strategy + +If WebFetch fails (network issues, URL changed): + +1. Use cached content from the language-specific files (note the cache date) +2. Inform user the data may be outdated +3. Suggest they check platform.claude.com or the GitHub repos directly diff --git a/shared/models.md b/shared/models.md new file mode 100644 index 0000000..24d0085 --- /dev/null +++ b/shared/models.md @@ -0,0 +1,68 @@ +# Claude Model Catalog + +**Only use exact model IDs listed in this file.** Never guess or construct model IDs — incorrect IDs will cause API errors. Use aliases wherever available. For the latest information, WebFetch the Models Overview URL in `shared/live-sources.md`. + +## Current Models (recommended) + +| Friendly Name | Alias (use this) | Full ID | Context | Max Output | Status | +|-------------------|---------------------|-------------------------------|----------------|------------|--------| +| Claude Opus 4.6 | `claude-opus-4-6` | — | 200K (1M beta) | 128K | Active | +| Claude Sonnet 4.6 | `claude-sonnet-4-6` | - | 200K (1M beta) | 64K | Active | +| Claude Haiku 4.5 | `claude-haiku-4-5` | `claude-haiku-4-5-20251001` | 200K | 64K | Active | + +### Model Descriptions + +- **Claude Opus 4.6** — Our most intelligent model for building agents and coding. Supports adaptive thinking (recommended), 128K max output tokens (requires streaming for large outputs). 1M context window available in beta via `context-1m-2025-08-07` header. +- **Claude Sonnet 4.6** — Our best combination of speed and intelligence. Supports adaptive thinking (recommended). 1M context window available in beta via `context-1m-2025-08-07` header. 64K max output tokens. +- **Claude Haiku 4.5** — Fastest and most cost-effective model for simple tasks. + +## Legacy Models (still active) + +| Friendly Name | Alias (use this) | Full ID | Status | +|-------------------|---------------------|-------------------------------|--------| +| Claude Opus 4.5 | `claude-opus-4-5` | `claude-opus-4-5-20251101` | Active | +| Claude Opus 4.1 | `claude-opus-4-1` | `claude-opus-4-1-20250805` | Active | +| Claude Sonnet 4.5 | `claude-sonnet-4-5` | `claude-sonnet-4-5-20250929` | Active | +| Claude Sonnet 4 | `claude-sonnet-4-0` | `claude-sonnet-4-20250514` | Active | +| Claude Opus 4 | `claude-opus-4-0` | `claude-opus-4-20250514` | Active | + +## Deprecated Models (retiring soon) + +| Friendly Name | Alias (use this) | Full ID | Status | +|-------------------|---------------------|-------------------------------|------------| +| Claude Haiku 3 | — | `claude-3-haiku-20240307` | Deprecated | + +## Retired Models (no longer available) + +| Friendly Name | Full ID | Retired | +|-------------------|-------------------------------|-------------| +| Claude Sonnet 3.7 | `claude-3-7-sonnet-20250219` | Feb 19, 2026 | +| Claude Haiku 3.5 | `claude-3-5-haiku-20241022` | Feb 19, 2026 | +| Claude Opus 3 | `claude-3-opus-20240229` | Jan 5, 2026 | +| Claude Sonnet 3.5 | `claude-3-5-sonnet-20241022` | Oct 28, 2025 | +| Claude Sonnet 3.5 | `claude-3-5-sonnet-20240620` | Oct 28, 2025 | +| Claude Sonnet 3 | `claude-3-sonnet-20240229` | Jul 21, 2025 | +| Claude 2.1 | `claude-2.1` | Jul 21, 2025 | +| Claude 2.0 | `claude-2.0` | Jul 21, 2025 | + +## Resolving User Requests + +When a user asks for a model by name, use this table to find the correct model ID: + +| User says... | Use this model ID | +|-------------------------------------------|--------------------------------| +| "opus", "most powerful" | `claude-opus-4-6` | +| "opus 4.6" | `claude-opus-4-6` | +| "opus 4.5" | `claude-opus-4-5` | +| "opus 4.1" | `claude-opus-4-1` | +| "opus 4", "opus 4.0" | `claude-opus-4-0` | +| "sonnet", "balanced" | `claude-sonnet-4-6` | +| "sonnet 4.6" | `claude-sonnet-4-6` | +| "sonnet 4.5" | `claude-sonnet-4-5` | +| "sonnet 4", "sonnet 4.0" | `claude-sonnet-4-0` | +| "sonnet 3.7" | Retired — suggest `claude-sonnet-4-5` | +| "sonnet 3.5" | Retired — suggest `claude-sonnet-4-5` | +| "haiku", "fast", "cheap" | `claude-haiku-4-5` | +| "haiku 4.5" | `claude-haiku-4-5` | +| "haiku 3.5" | Retired — suggest `claude-haiku-4-5` | +| "haiku 3" | Deprecated — suggest `claude-haiku-4-5` | diff --git a/shared/tool-use-concepts.md b/shared/tool-use-concepts.md new file mode 100644 index 0000000..2a1b84e --- /dev/null +++ b/shared/tool-use-concepts.md @@ -0,0 +1,305 @@ +# Tool Use Concepts + +This file covers the conceptual foundations of tool use with the Claude API. For language-specific code examples, see the `python/`, `typescript/`, or other language folders. + +## User-Defined Tools + +### Tool Definition Structure + +> **Note:** When using the Tool Runner (beta), tool schemas are generated automatically from your function signatures (Python), Zod schemas (TypeScript), annotated classes (Java), `jsonschema` struct tags (Go), or `BaseTool` subclasses (Ruby). The raw JSON schema format below is for the manual approach or SDKs without tool runner support. + +Each tool requires a name, description, and JSON Schema for its inputs: + +```json +{ + "name": "get_weather", + "description": "Get current weather for a location", + "input_schema": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and state, e.g., San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "Temperature unit" + } + }, + "required": ["location"] + } +} +``` + +**Best practices for tool definitions:** + +- Use clear, descriptive names (e.g., `get_weather`, `search_database`, `send_email`) +- Write detailed descriptions — Claude uses these to decide when to use the tool +- Include descriptions for each property +- Use `enum` for parameters with a fixed set of values +- Mark truly required parameters in `required`; make others optional with defaults + +--- + +### Tool Choice Options + +Control when Claude uses tools: + +| Value | Behavior | +| --------------------------------- | --------------------------------------------- | +| `{"type": "auto"}` | Claude decides whether to use tools (default) | +| `{"type": "any"}` | Claude must use at least one tool | +| `{"type": "tool", "name": "..."}` | Claude must use the specified tool | +| `{"type": "none"}` | Claude cannot use tools | + +Any `tool_choice` value can also include `"disable_parallel_tool_use": true` to force Claude to use at most one tool per response. By default, Claude may request multiple tool calls in a single response. + +--- + +### Tool Runner vs Manual Loop + +**Tool Runner (Recommended):** The SDK's tool runner handles the agentic loop automatically — it calls the API, detects tool use requests, executes your tool functions, feeds results back to Claude, and repeats until Claude stops calling tools. Available in Python, TypeScript, Java, Go, and Ruby SDKs (beta). The Python SDK also provides MCP conversion helpers (`anthropic.lib.tools.mcp`) to convert MCP tools, prompts, and resources for use with the tool runner — see `python/claude-api/tool-use.md` for details. + +**Manual Agentic Loop:** Use when you need fine-grained control over the loop (e.g., custom logging, conditional tool execution, human-in-the-loop approval). Loop until `stop_reason == "end_turn"`, always append the full `response.content` to preserve tool_use blocks, and ensure each `tool_result` includes the matching `tool_use_id`. + +**Stop reasons for server-side tools:** When using server-side tools (code execution, web search, etc.), the API runs a server-side sampling loop. If this loop reaches its default limit of 10 iterations, the response will have `stop_reason: "pause_turn"`. To continue, re-send the user message and assistant response and make another API request — the server will resume where it left off. Do NOT add an extra user message like "Continue." — the API detects the trailing `server_tool_use` block and knows to resume automatically. + +```python +# Handle pause_turn in your agentic loop +if response.stop_reason == "pause_turn": + messages = [ + {"role": "user", "content": user_query}, + {"role": "assistant", "content": response.content}, + ] + # Make another API request — server resumes automatically + response = client.messages.create( + model="claude-opus-4-6", messages=messages, tools=tools + ) +``` + +Set a `max_continuations` limit (e.g., 5) to prevent infinite loops. For the full guide, see: `https://platform.claude.com/docs/en/build-with-claude/handling-stop-reasons` + +> **Security:** The tool runner executes your tool functions automatically whenever Claude requests them. For tools with side effects (sending emails, modifying databases, financial transactions), validate inputs within your tool functions and consider requiring confirmation for destructive operations. Use the manual agentic loop if you need human-in-the-loop approval before each tool execution. + +--- + +### Handling Tool Results + +When Claude uses a tool, the response contains a `tool_use` block. You must: + +1. Execute the tool with the provided input +2. Send the result back in a `tool_result` message +3. Continue the conversation + +**Error handling in tool results:** When a tool execution fails, set `"is_error": true` and provide an informative error message. Claude will typically acknowledge the error and either try a different approach or ask for clarification. + +**Multiple tool calls:** Claude can request multiple tools in a single response. Handle them all before continuing — send all results back in a single `user` message. + +--- + +## Server-Side Tools: Code Execution + +The code execution tool lets Claude run code in a secure, sandboxed container. Unlike user-defined tools, server-side tools run on Anthropic's infrastructure — you don't execute anything client-side. Just include the tool definition and Claude handles the rest. + +### Key Facts + +- Runs in an isolated container (1 CPU, 5 GiB RAM, 5 GiB disk) +- No internet access (fully sandboxed) +- Python 3.11 with data science libraries pre-installed +- Containers persist for 30 days and can be reused across requests +- Free when used with web search/web fetch tools; otherwise $0.05/hour after 1,550 free hours/month per organization + +### Tool Definition + +The tool requires no schema — just declare it in the `tools` array: + +```json +{ + "type": "code_execution_20260120", + "name": "code_execution" +} +``` + +Claude automatically gains access to `bash_code_execution` (run shell commands) and `text_editor_code_execution` (create/view/edit files). + +### Pre-installed Python Libraries + +- **Data science**: pandas, numpy, scipy, scikit-learn, statsmodels +- **Visualization**: matplotlib, seaborn +- **File processing**: openpyxl, xlsxwriter, pillow, pypdf, pdfplumber, python-docx, python-pptx +- **Math**: sympy, mpmath +- **Utilities**: tqdm, python-dateutil, pytz, sqlite3 + +Additional packages can be installed at runtime via `pip install`. + +### Supported File Types for Upload + +| Type | Extensions | +| ------ | ---------------------------------- | +| Data | CSV, Excel (.xlsx/.xls), JSON, XML | +| Images | JPEG, PNG, GIF, WebP | +| Text | .txt, .md, .py, .js, etc. | + +### Container Reuse + +Reuse containers across requests to maintain state (files, installed packages, variables). Extract the `container_id` from the first response and pass it to subsequent requests. + +### Response Structure + +The response contains interleaved text and tool result blocks: + +- `text` — Claude's explanation +- `server_tool_use` — What Claude is doing +- `bash_code_execution_tool_result` — Code execution output (check `return_code` for success/failure) +- `text_editor_code_execution_tool_result` — File operation results + +> **Security:** Always sanitize filenames with `os.path.basename()` / `path.basename()` before writing downloaded files to disk to prevent path traversal attacks. Write files to a dedicated output directory. + +--- + +## Server-Side Tools: Web Search and Web Fetch + +Web search and web fetch let Claude search the web and retrieve page content. They run server-side — just include the tool definitions and Claude handles queries, fetching, and result processing automatically. + +### Tool Definitions + +```json +[ + { "type": "web_search_20260209", "name": "web_search" }, + { "type": "web_fetch_20260209", "name": "web_fetch" } +] +``` + +### Dynamic Filtering (Opus 4.6 / Sonnet 4.6) + +The `web_search_20260209` and `web_fetch_20260209` versions support **dynamic filtering** — Claude writes and executes code to filter search results before they reach the context window, improving accuracy and token efficiency. Dynamic filtering is built into these tool versions and activates automatically; you do not need to separately declare the `code_execution` tool or pass any beta header. + +```json +{ + "tools": [ + { "type": "web_search_20260209", "name": "web_search" }, + { "type": "web_fetch_20260209", "name": "web_fetch" } + ] +} +``` + +Without dynamic filtering, the previous `web_search_20250305` version is also available. + +> **Note:** Only include the standalone `code_execution` tool when your application needs code execution for its own purposes (data analysis, file processing, visualization) independent of web search. Including it alongside `_20260209` web tools creates a second execution environment that can confuse the model. + +--- + +## Server-Side Tools: Programmatic Tool Calling + +Programmatic tool calling lets Claude execute complex multi-tool workflows in code, keeping intermediate results out of the context window. Claude writes code that calls your tools directly, reducing token usage for multi-step operations. + +For full documentation, use WebFetch: + +- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/programmatic-tool-calling` + +--- + +## Server-Side Tools: Tool Search + +The tool search tool lets Claude dynamically discover tools from large libraries without loading all definitions into the context window. Useful when you have many tools but only a few are relevant to any given query. + +For full documentation, use WebFetch: + +- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool` + +--- + +## Tool Use Examples + +You can provide sample tool calls directly in your tool definitions to demonstrate usage patterns and reduce parameter errors. This helps Claude understand how to correctly format tool inputs, especially for tools with complex schemas. + +For full documentation, use WebFetch: + +- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/implement-tool-use` + +--- + +## Server-Side Tools: Computer Use + +Computer use lets Claude interact with a desktop environment (screenshots, mouse, keyboard). It can be Anthropic-hosted (server-side, like code execution) or self-hosted (you provide the environment and execute actions client-side). + +For full documentation, use WebFetch: + +- URL: `https://platform.claude.com/docs/en/agents-and-tools/computer-use/overview` + +--- + +## Client-Side Tools: Memory + +The memory tool enables Claude to store and retrieve information across conversations through a memory file directory. Claude can create, read, update, and delete files that persist between sessions. + +### Key Facts + +- Client-side tool — you control storage via your implementation +- Supports commands: `view`, `create`, `str_replace`, `insert`, `delete`, `rename` +- Operates on files in a `/memories` directory +- The SDKs provide helper classes/functions for implementing the memory backend + +> **Security:** Never store API keys, passwords, tokens, or other secrets in memory files. Be cautious with personally identifiable information (PII) — check data privacy regulations (GDPR, CCPA) before persisting user data. The reference implementations have no built-in access control; in multi-user systems, implement per-user memory directories and authentication in your tool handlers. + +For full implementation examples, use WebFetch: + +- Docs: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/memory-tool.md` + +--- + +## Structured Outputs + +Structured outputs constrain Claude's responses to follow a specific JSON schema, guaranteeing valid, parseable output. This is not a separate tool — it enhances the Messages API response format and/or tool parameter validation. + +Two features are available: + +- **JSON outputs** (`output_config.format`): Control Claude's response format +- **Strict tool use** (`strict: true`): Guarantee valid tool parameter schemas + +**Supported models:** Claude Opus 4.6, Claude Sonnet 4.6, and Claude Haiku 4.5. Legacy models (Claude Opus 4.5, Claude Opus 4.1) also support structured outputs. + +> **Recommended:** Use `client.messages.parse()` which automatically validates responses against your schema. When using `messages.create()` directly, use `output_config: {format: {...}}`. The `output_format` convenience parameter is also accepted by some SDK methods (e.g., `.parse()`), but `output_config.format` is the canonical API-level parameter. + +### JSON Schema Limitations + +**Supported:** + +- Basic types: object, array, string, integer, number, boolean, null +- `enum`, `const`, `anyOf`, `allOf`, `$ref`/`$def` +- String formats: `date-time`, `time`, `date`, `duration`, `email`, `hostname`, `uri`, `ipv4`, `ipv6`, `uuid` +- `additionalProperties: false` (required for all objects) + +**Not supported:** + +- Recursive schemas +- Numerical constraints (`minimum`, `maximum`, `multipleOf`) +- String constraints (`minLength`, `maxLength`) +- Complex array constraints +- `additionalProperties` set to anything other than `false` + +The Python and TypeScript SDKs automatically handle unsupported constraints by removing them from the schema sent to the API and validating them client-side. + +### Important Notes + +- **First request latency**: New schemas incur a one-time compilation cost. Subsequent requests with the same schema use a 24-hour cache. +- **Refusals**: If Claude refuses for safety reasons (`stop_reason: "refusal"`), the output may not match your schema. +- **Token limits**: If `stop_reason: "max_tokens"`, output may be incomplete. Increase `max_tokens`. +- **Incompatible with**: Citations (returns 400 error), message prefilling. +- **Works with**: Batches API, streaming, token counting, extended thinking. + +--- + +## Tips for Effective Tool Use + +1. **Provide detailed descriptions**: Claude relies heavily on descriptions to understand when and how to use tools +2. **Use specific tool names**: `get_current_weather` is better than `weather` +3. **Validate inputs**: Always validate tool inputs before execution +4. **Handle errors gracefully**: Return informative error messages so Claude can adapt +5. **Limit tool count**: Too many tools can confuse the model — keep the set focused +6. **Test tool interactions**: Verify Claude uses tools correctly in various scenarios + +For detailed tool use documentation, use WebFetch: + +- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/overview` diff --git a/typescript/agent-sdk/README.md b/typescript/agent-sdk/README.md new file mode 100644 index 0000000..997ca17 --- /dev/null +++ b/typescript/agent-sdk/README.md @@ -0,0 +1,220 @@ +# Agent SDK — TypeScript + +The Claude Agent SDK provides a higher-level interface for building AI agents with built-in tools, safety features, and agentic capabilities. + +## Installation + +```bash +npm install @anthropic-ai/claude-agent-sdk +``` + +--- + +## Quick Start + +```typescript +import { query } from "@anthropic-ai/claude-agent-sdk"; + +for await (const message of query({ + prompt: "Explain this codebase", + options: { allowedTools: ["Read", "Glob", "Grep"] }, +})) { + if ("result" in message) { + console.log(message.result); + } +} +``` + +--- + +## Built-in Tools + +| Tool | Description | +| --------- | ------------------------------------ | +| Read | Read files in the workspace | +| Write | Create new files | +| Edit | Make precise edits to existing files | +| Bash | Execute shell commands | +| Glob | Find files by pattern | +| Grep | Search files by content | +| WebSearch | Search the web for information | +| WebFetch | Fetch and analyze web pages | +| AskUserQuestion | Ask user clarifying questions | +| Agent | Spawn subagents | + +--- + +## Permission System + +```typescript +for await (const message of query({ + prompt: "Refactor the authentication module", + options: { + allowedTools: ["Read", "Edit", "Write"], + permissionMode: "acceptEdits", + }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +Permission modes: + +- `"default"`: Prompt for dangerous operations +- `"plan"`: Planning only, no execution +- `"acceptEdits"`: Auto-accept file edits +- `"dontAsk"`: Don't prompt (useful for CI/CD) +- `"bypassPermissions"`: Skip all prompts (requires `allowDangerouslySkipPermissions: true` in options) + +--- + +## MCP (Model Context Protocol) Support + +```typescript +for await (const message of query({ + prompt: "Open example.com and describe what you see", + options: { + mcpServers: { + playwright: { command: "npx", args: ["@playwright/mcp@latest"] }, + }, + }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +### In-Process MCP Tools + +You can define custom tools that run in-process using `tool()` and `createSdkMcpServer`: + +```typescript +import { query, tool, createSdkMcpServer } from "@anthropic-ai/claude-agent-sdk"; +import { z } from "zod"; + +const myTool = tool("my-tool", "Description", { input: z.string() }, async (args) => { + return { content: [{ type: "text", text: "result" }] }; +}); + +const server = createSdkMcpServer({ name: "my-server", tools: [myTool] }); + +// Pass to query +for await (const message of query({ + prompt: "Use my-tool to do something", + options: { mcpServers: { myServer: server } }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +--- + +## Hooks + +```typescript +import { query, HookCallback } from "@anthropic-ai/claude-agent-sdk"; +import { appendFileSync } from "fs"; + +const logFileChange: HookCallback = async (input) => { + const filePath = (input as any).tool_input?.file_path ?? "unknown"; + appendFileSync( + "./audit.log", + `${new Date().toISOString()}: modified ${filePath}\n`, + ); + return {}; +}; + +for await (const message of query({ + prompt: "Refactor utils.py to improve readability", + options: { + allowedTools: ["Read", "Edit", "Write"], + permissionMode: "acceptEdits", + hooks: { + PostToolUse: [{ matcher: "Edit|Write", hooks: [logFileChange] }], + }, + }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +Available hook events: `PreToolUse`, `PostToolUse`, `PostToolUseFailure`, `Notification`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Stop`, `SubagentStart`, `SubagentStop`, `PreCompact`, `PermissionRequest`, `Setup`, `TeammateIdle`, `TaskCompleted`, `ConfigChange` + +--- + +## Common Options + +`query()` takes a top-level `prompt` (string) and an `options` object: + +```typescript +query({ prompt: "...", options: { ... } }) +``` + +| Option | Type | Description | +| ----------------------------------- | ------ | -------------------------------------------------------------------------- | +| `cwd` | string | Working directory for file operations | +| `allowedTools` | array | Tools the agent can use (e.g., `["Read", "Edit", "Bash"]`) | +| `tools` | array | Built-in tools to make available (restricts the default set) | +| `disallowedTools` | array | Tools to explicitly disallow | +| `permissionMode` | string | How to handle permission prompts | +| `allowDangerouslySkipPermissions` | bool | Must be `true` to use `permissionMode: "bypassPermissions"` | +| `mcpServers` | object | MCP servers to connect to | +| `hooks` | object | Hooks for customizing behavior | +| `systemPrompt` | string | Custom system prompt | +| `maxTurns` | number | Maximum agent turns before stopping | +| `maxBudgetUsd` | number | Maximum budget in USD for the query | +| `model` | string | Model ID (default: determined by CLI) | +| `agents` | object | Subagent definitions (`Record`) | +| `outputFormat` | object | Structured output schema | +| `thinking` | object | Thinking/reasoning control | +| `betas` | array | Beta features to enable (e.g., `["context-1m-2025-08-07"]`) | +| `settingSources` | array | Settings to load (e.g., `["project"]`). Default: none (no CLAUDE.md files) | +| `env` | object | Environment variables to set for the session | + +--- + +## Subagents + +```typescript +for await (const message of query({ + prompt: "Use the code-reviewer agent to review this codebase", + options: { + allowedTools: ["Read", "Glob", "Grep", "Agent"], + agents: { + "code-reviewer": { + description: "Expert code reviewer for quality and security reviews.", + prompt: "Analyze code quality and suggest improvements.", + tools: ["Read", "Glob", "Grep"], + }, + }, + }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +--- + +## Message Types + +```typescript +for await (const message of query({ + prompt: "Find TODO comments", + options: { allowedTools: ["Read", "Glob", "Grep"] }, +})) { + if ("result" in message) { + console.log(message.result); + } else if (message.type === "system" && message.subtype === "init") { + const sessionId = message.session_id; // Capture for resuming later + } +} +``` + +--- + +## Best Practices + +1. **Always specify allowedTools** — Explicitly list which tools the agent can use +2. **Set working directory** — Always specify `cwd` for file operations +3. **Use appropriate permission modes** — Start with `"default"` and only escalate when needed +4. **Handle all message types** — Check for `result` property to get agent output +5. **Limit maxTurns** — Prevent runaway agents with reasonable limits diff --git a/typescript/agent-sdk/patterns.md b/typescript/agent-sdk/patterns.md new file mode 100644 index 0000000..62b7fee --- /dev/null +++ b/typescript/agent-sdk/patterns.md @@ -0,0 +1,150 @@ +# Agent SDK Patterns — TypeScript + +## Basic Agent + +```typescript +import { query } from "@anthropic-ai/claude-agent-sdk"; + +async function main() { + for await (const message of query({ + prompt: "Explain what this repository does", + options: { + cwd: "/path/to/project", + allowedTools: ["Read", "Glob", "Grep"], + }, + })) { + if ("result" in message) { + console.log(message.result); + } + } +} + +main(); +``` + +--- + +## Hooks + +### After Tool Use Hook + +```typescript +import { query, HookCallback } from "@anthropic-ai/claude-agent-sdk"; +import { appendFileSync } from "fs"; + +const logFileChange: HookCallback = async (input) => { + const filePath = (input as any).tool_input?.file_path ?? "unknown"; + appendFileSync( + "./audit.log", + `${new Date().toISOString()}: modified ${filePath}\n`, + ); + return {}; +}; + +for await (const message of query({ + prompt: "Refactor utils.py to improve readability", + options: { + allowedTools: ["Read", "Edit", "Write"], + permissionMode: "acceptEdits", + hooks: { + PostToolUse: [{ matcher: "Edit|Write", hooks: [logFileChange] }], + }, + }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +--- + +## Subagents + +```typescript +import { query } from "@anthropic-ai/claude-agent-sdk"; + +for await (const message of query({ + prompt: "Use the code-reviewer agent to review this codebase", + options: { + allowedTools: ["Read", "Glob", "Grep", "Agent"], + agents: { + "code-reviewer": { + description: "Expert code reviewer for quality and security reviews.", + prompt: "Analyze code quality and suggest improvements.", + tools: ["Read", "Glob", "Grep"], + }, + }, + }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +--- + +## MCP Server Integration + +### Browser Automation (Playwright) + +```typescript +for await (const message of query({ + prompt: "Open example.com and describe what you see", + options: { + mcpServers: { + playwright: { command: "npx", args: ["@playwright/mcp@latest"] }, + }, + }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +--- + +## Session Resumption + +```typescript +import { query } from "@anthropic-ai/claude-agent-sdk"; + +let sessionId: string | undefined; + +// First query: capture the session ID +for await (const message of query({ + prompt: "Read the authentication module", + options: { allowedTools: ["Read", "Glob"] }, +})) { + if (message.type === "system" && message.subtype === "init") { + sessionId = message.session_id; + } +} + +// Resume with full context from the first query +for await (const message of query({ + prompt: "Now find all places that call it", + options: { resume: sessionId }, +})) { + if ("result" in message) console.log(message.result); +} +``` + +--- + +## Custom System Prompt + +```typescript +import { query } from "@anthropic-ai/claude-agent-sdk"; + +for await (const message of query({ + prompt: "Review this code", + options: { + allowedTools: ["Read", "Glob", "Grep"], + systemPrompt: `You are a senior code reviewer focused on: +1. Security vulnerabilities +2. Performance issues +3. Code maintainability + +Always provide specific line numbers and suggestions for improvement.`, + }, +})) { + if ("result" in message) console.log(message.result); +} +``` diff --git a/typescript/claude-api/README.md b/typescript/claude-api/README.md new file mode 100644 index 0000000..9da778b --- /dev/null +++ b/typescript/claude-api/README.md @@ -0,0 +1,313 @@ +# Claude API — TypeScript + +## Installation + +```bash +npm install @anthropic-ai/sdk +``` + +## Client Initialization + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +// Default (uses ANTHROPIC_API_KEY env var) +const client = new Anthropic(); + +// Explicit API key +const client = new Anthropic({ apiKey: "your-api-key" }); +``` + +--- + +## Basic Message Request + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [{ role: "user", content: "What is the capital of France?" }], +}); +console.log(response.content[0].text); +``` + +--- + +## System Prompts + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + system: + "You are a helpful coding assistant. Always provide examples in Python.", + messages: [{ role: "user", content: "How do I read a JSON file?" }], +}); +``` + +--- + +## Vision (Images) + +### URL + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { + role: "user", + content: [ + { + type: "image", + source: { type: "url", url: "https://example.com/image.png" }, + }, + { type: "text", text: "Describe this image" }, + ], + }, + ], +}); +``` + +### Base64 + +```typescript +import fs from "fs"; + +const imageData = fs.readFileSync("image.png").toString("base64"); + +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { + role: "user", + content: [ + { + type: "image", + source: { type: "base64", media_type: "image/png", data: imageData }, + }, + { type: "text", text: "What's in this image?" }, + ], + }, + ], +}); +``` + +--- + +## Prompt Caching + +### Automatic Caching (Recommended) + +Use top-level `cache_control` to automatically cache the last cacheable block in the request: + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + cache_control: { type: "ephemeral" }, // auto-caches the last cacheable block + system: "You are an expert on this large document...", + messages: [{ role: "user", content: "Summarize the key points" }], +}); +``` + +### Manual Cache Control + +For fine-grained control, add `cache_control` to specific content blocks: + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + system: [ + { + type: "text", + text: "You are an expert on this large document...", + cache_control: { type: "ephemeral" }, // default TTL is 5 minutes + }, + ], + messages: [{ role: "user", content: "Summarize the key points" }], +}); + +// With explicit TTL (time-to-live) +const response2 = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + system: [ + { + type: "text", + text: "You are an expert on this large document...", + cache_control: { type: "ephemeral", ttl: "1h" }, // 1 hour TTL + }, + ], + messages: [{ role: "user", content: "Summarize the key points" }], +}); +``` + +--- + +## Extended Thinking + +> **Opus 4.6 and Sonnet 4.6:** Use adaptive thinking. `budget_tokens` is deprecated on both Opus 4.6 and Sonnet 4.6. +> **Older models:** Use `thinking: {type: "enabled", budget_tokens: N}` (must be < `max_tokens`, min 1024). + +```typescript +// Opus 4.6: adaptive thinking (recommended) +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 16000, + thinking: { type: "adaptive" }, + output_config: { effort: "high" }, // low | medium | high | max + messages: [ + { role: "user", content: "Solve this math problem step by step..." }, + ], +}); + +for (const block of response.content) { + if (block.type === "thinking") { + console.log("Thinking:", block.thinking); + } else if (block.type === "text") { + console.log("Response:", block.text); + } +} +``` + +--- + +## Error Handling + +Use the SDK's typed exception classes — never check error messages with string matching: + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +try { + const response = await client.messages.create({...}); +} catch (error) { + if (error instanceof Anthropic.BadRequestError) { + console.error("Bad request:", error.message); + } else if (error instanceof Anthropic.AuthenticationError) { + console.error("Invalid API key"); + } else if (error instanceof Anthropic.RateLimitError) { + console.error("Rate limited - retry later"); + } else if (error instanceof Anthropic.APIError) { + console.error(`API error ${error.status}:`, error.message); + } +} +``` + +All classes extend `Anthropic.APIError` with a typed `status` field. Check from most specific to least specific. See [shared/error-codes.md](../../shared/error-codes.md) for the full error code reference. + +--- + +## Multi-Turn Conversations + +The API is stateless — send the full conversation history each time. Use `Anthropic.MessageParam[]` to type the messages array: + +```typescript +const messages: Anthropic.MessageParam[] = [ + { role: "user", content: "My name is Alice." }, + { role: "assistant", content: "Hello Alice! Nice to meet you." }, + { role: "user", content: "What's my name?" }, +]; + +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: messages, +}); +``` + +**Rules:** + +- Messages must alternate between `user` and `assistant` +- First message must be `user` +- Use SDK types (`Anthropic.MessageParam`, `Anthropic.Message`, `Anthropic.Tool`, etc.) for all API data structures — don't redefine equivalent interfaces + +--- + +### Compaction (long conversations) + +> **Beta, Opus 4.6 only.** When conversations approach the 200K context window, compaction automatically summarizes earlier context server-side. The API returns a `compaction` block; you must pass it back on subsequent requests — append `response.content`, not just the text. + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); +const messages: Anthropic.Beta.BetaMessageParam[] = []; + +async function chat(userMessage: string): Promise { + messages.push({ role: "user", content: userMessage }); + + const response = await client.beta.messages.create({ + betas: ["compact-2026-01-12"], + model: "claude-opus-4-6", + max_tokens: 4096, + messages, + context_management: { + edits: [{ type: "compact_20260112" }], + }, + }); + + // Append full content — compaction blocks must be preserved + messages.push({ role: "assistant", content: response.content }); + + const textBlock = response.content.find((block) => block.type === "text"); + return textBlock?.text ?? ""; +} + +// Compaction triggers automatically when context grows large +console.log(await chat("Help me build a Python web scraper")); +console.log(await chat("Add support for JavaScript-rendered pages")); +console.log(await chat("Now add rate limiting and error handling")); +``` + +--- + +## Stop Reasons + +The `stop_reason` field in the response indicates why the model stopped generating: + +| Value | Meaning | +| --------------- | --------------------------------------------------------------- | +| `end_turn` | Claude finished its response naturally | +| `max_tokens` | Hit the `max_tokens` limit — increase it or use streaming | +| `stop_sequence` | Hit a custom stop sequence | +| `tool_use` | Claude wants to call a tool — execute it and continue | +| `pause_turn` | Model paused and can be resumed (agentic flows) | +| `refusal` | Claude refused for safety reasons — output may not match schema | + +--- + +## Cost Optimization Strategies + +### 1. Use Prompt Caching for Repeated Context + +```typescript +// Automatic caching (simplest — caches the last cacheable block) +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + cache_control: { type: "ephemeral" }, + system: largeDocumentText, // e.g., 50KB of context + messages: [{ role: "user", content: "Summarize the key points" }], +}); + +// First request: full cost +// Subsequent requests: ~90% cheaper for cached portion +``` + +### 2. Use Token Counting Before Requests + +```typescript +const countResponse = await client.messages.countTokens({ + model: "claude-opus-4-6", + messages: messages, + system: system, +}); + +const estimatedInputCost = countResponse.input_tokens * 0.000005; // $5/1M tokens +console.log(`Estimated input cost: $${estimatedInputCost.toFixed(4)}`); +``` diff --git a/typescript/claude-api/batches.md b/typescript/claude-api/batches.md new file mode 100644 index 0000000..4f6f4f3 --- /dev/null +++ b/typescript/claude-api/batches.md @@ -0,0 +1,106 @@ +# Message Batches API — TypeScript + +The Batches API (`POST /v1/messages/batches`) processes Messages API requests asynchronously at 50% of standard prices. + +## Key Facts + +- Up to 100,000 requests or 256 MB per batch +- Most batches complete within 1 hour; maximum 24 hours +- Results available for 29 days after creation +- 50% cost reduction on all token usage +- All Messages API features supported (vision, tools, caching, etc.) + +--- + +## Create a Batch + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); + +const messageBatch = await client.messages.batches.create({ + requests: [ + { + custom_id: "request-1", + params: { + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { role: "user", content: "Summarize climate change impacts" }, + ], + }, + }, + { + custom_id: "request-2", + params: { + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { role: "user", content: "Explain quantum computing basics" }, + ], + }, + }, + ], +}); + +console.log(`Batch ID: ${messageBatch.id}`); +console.log(`Status: ${messageBatch.processing_status}`); +``` + +--- + +## Poll for Completion + +```typescript +let batch; +while (true) { + batch = await client.messages.batches.retrieve(messageBatch.id); + if (batch.processing_status === "ended") break; + console.log( + `Status: ${batch.processing_status}, processing: ${batch.request_counts.processing}`, + ); + await new Promise((resolve) => setTimeout(resolve, 60_000)); +} + +console.log("Batch complete!"); +console.log(`Succeeded: ${batch.request_counts.succeeded}`); +console.log(`Errored: ${batch.request_counts.errored}`); +``` + +--- + +## Retrieve Results + +```typescript +for await (const result of await client.messages.batches.results( + messageBatch.id, +)) { + switch (result.result.type) { + case "succeeded": + console.log( + `[${result.custom_id}] ${result.result.message.content[0].text.slice(0, 100)}`, + ); + break; + case "errored": + if (result.result.error.type === "invalid_request") { + console.log(`[${result.custom_id}] Validation error - fix and retry`); + } else { + console.log(`[${result.custom_id}] Server error - safe to retry`); + } + break; + case "expired": + console.log(`[${result.custom_id}] Expired - resubmit`); + break; + } +} +``` + +--- + +## Cancel a Batch + +```typescript +const cancelled = await client.messages.batches.cancel(messageBatch.id); +console.log(`Status: ${cancelled.processing_status}`); // "canceling" +``` diff --git a/typescript/claude-api/files-api.md b/typescript/claude-api/files-api.md new file mode 100644 index 0000000..8224b52 --- /dev/null +++ b/typescript/claude-api/files-api.md @@ -0,0 +1,98 @@ +# Files API — TypeScript + +The Files API uploads files for use in Messages API requests. Reference files via `file_id` in content blocks, avoiding re-uploads across multiple API calls. + +**Beta:** Pass `betas: ["files-api-2025-04-14"]` in your API calls (the SDK sets the required header automatically). + +## Key Facts + +- Maximum file size: 500 MB +- Total storage: 100 GB per organization +- Files persist until deleted +- File operations (upload, list, delete) are free; content used in messages is billed as input tokens +- Not available on Amazon Bedrock or Google Vertex AI + +--- + +## Upload a File + +```typescript +import Anthropic, { toFile } from "@anthropic-ai/sdk"; +import fs from "fs"; + +const client = new Anthropic(); + +const uploaded = await client.beta.files.upload({ + file: await toFile(fs.createReadStream("report.pdf"), undefined, { + type: "application/pdf", + }), + betas: ["files-api-2025-04-14"], +}); + +console.log(`File ID: ${uploaded.id}`); +console.log(`Size: ${uploaded.size_bytes} bytes`); +``` + +--- + +## Use a File in Messages + +### PDF / Text Document + +```typescript +const response = await client.beta.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { + role: "user", + content: [ + { type: "text", text: "Summarize the key findings in this report." }, + { + type: "document", + source: { type: "file", file_id: uploaded.id }, + title: "Q4 Report", + citations: { enabled: true }, + }, + ], + }, + ], + betas: ["files-api-2025-04-14"], +}); + +console.log(response.content[0].text); +``` + +--- + +## Manage Files + +### List Files + +```typescript +const files = await client.beta.files.list({ + betas: ["files-api-2025-04-14"], +}); +for (const f of files.data) { + console.log(`${f.id}: ${f.filename} (${f.size_bytes} bytes)`); +} +``` + +### Delete a File + +```typescript +await client.beta.files.delete("file_011CNha8iCJcU1wXNR6q4V8w", { + betas: ["files-api-2025-04-14"], +}); +``` + +### Download a File + +```typescript +const response = await client.beta.files.download( + "file_011CNha8iCJcU1wXNR6q4V8w", + { betas: ["files-api-2025-04-14"] }, +); +const content = Buffer.from(await response.arrayBuffer()); +await fs.promises.writeFile("output.txt", content); +``` diff --git a/typescript/claude-api/streaming.md b/typescript/claude-api/streaming.md new file mode 100644 index 0000000..a950571 --- /dev/null +++ b/typescript/claude-api/streaming.md @@ -0,0 +1,178 @@ +# Streaming — TypeScript + +## Quick Start + +```typescript +const stream = client.messages.stream({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [{ role: "user", content: "Write a story" }], +}); + +for await (const event of stream) { + if ( + event.type === "content_block_delta" && + event.delta.type === "text_delta" + ) { + process.stdout.write(event.delta.text); + } +} +``` + +--- + +## Handling Different Content Types + +> **Opus 4.6:** Use `thinking: {type: "adaptive"}`. On older models, use `thinking: {type: "enabled", budget_tokens: N}` instead. + +```typescript +const stream = client.messages.stream({ + model: "claude-opus-4-6", + max_tokens: 16000, + thinking: { type: "adaptive" }, + messages: [{ role: "user", content: "Analyze this problem" }], +}); + +for await (const event of stream) { + switch (event.type) { + case "content_block_start": + switch (event.content_block.type) { + case "thinking": + console.log("\n[Thinking...]"); + break; + case "text": + console.log("\n[Response:]"); + break; + } + break; + case "content_block_delta": + switch (event.delta.type) { + case "thinking_delta": + process.stdout.write(event.delta.thinking); + break; + case "text_delta": + process.stdout.write(event.delta.text); + break; + } + break; + } +} +``` + +--- + +## Streaming with Tool Use (Tool Runner) + +Use the tool runner with `stream: true`. The outer loop iterates over tool runner iterations (messages), the inner loop processes stream events: + +```typescript +import Anthropic from "@anthropic-ai/sdk"; +import { betaZodTool } from "@anthropic-ai/sdk/helpers/beta/zod"; +import { z } from "zod"; + +const client = new Anthropic(); + +const getWeather = betaZodTool({ + name: "get_weather", + description: "Get current weather for a location", + inputSchema: z.object({ + location: z.string().describe("City and state, e.g., San Francisco, CA"), + }), + run: async ({ location }) => `72°F and sunny in ${location}`, +}); + +const runner = client.beta.messages.toolRunner({ + model: "claude-opus-4-6", + max_tokens: 4096, + tools: [getWeather], + messages: [ + { role: "user", content: "What's the weather in Paris and London?" }, + ], + stream: true, +}); + +// Outer loop: each tool runner iteration +for await (const messageStream of runner) { + // Inner loop: stream events for this iteration + for await (const event of messageStream) { + switch (event.type) { + case "content_block_delta": + switch (event.delta.type) { + case "text_delta": + process.stdout.write(event.delta.text); + break; + case "input_json_delta": + // Tool input being streamed + break; + } + break; + } + } +} +``` + +--- + +## Getting the Final Message + +```typescript +const stream = client.messages.stream({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [{ role: "user", content: "Hello" }], +}); + +for await (const event of stream) { + // Process events... +} + +const finalMessage = await stream.finalMessage(); +console.log(`Tokens used: ${finalMessage.usage.output_tokens}`); +``` + +--- + +## Stream Event Types + +| Event Type | Description | When it fires | +| --------------------- | --------------------------- | --------------------------------- | +| `message_start` | Contains message metadata | Once at the beginning | +| `content_block_start` | New content block beginning | When a text/tool_use block starts | +| `content_block_delta` | Incremental content update | For each token/chunk | +| `content_block_stop` | Content block complete | When a block finishes | +| `message_delta` | Message-level updates | Contains `stop_reason`, usage | +| `message_stop` | Message complete | Once at the end | + +## Best Practices + +1. **Always flush output** — Use `process.stdout.write()` for immediate display +2. **Handle partial responses** — If the stream is interrupted, you may have incomplete content +3. **Track token usage** — The `message_delta` event contains usage information +4. **Use `finalMessage()`** — Get the complete `Anthropic.Message` object even when streaming. Don't wrap `.on()` events in `new Promise()` — `finalMessage()` handles all completion/error/abort states internally +5. **Buffer for web UIs** — Consider buffering a few tokens before rendering to avoid excessive DOM updates +6. **Use `stream.on("text", ...)` for deltas** — The `text` event provides just the delta string, simpler than manually filtering `content_block_delta` events +7. **For agentic loops with streaming** — See the [Streaming Manual Loop](./tool-use.md#streaming-manual-loop) section in tool-use.md for combining `stream()` + `finalMessage()` with a tool-use loop + +## Raw SSE Format + +If using raw HTTP (not SDKs), the stream returns Server-Sent Events: + +``` +event: message_start +data: {"type":"message_start","message":{"id":"msg_...","type":"message",...}} + +event: content_block_start +data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}} + +event: content_block_delta +data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello"}} + +event: content_block_stop +data: {"type":"content_block_stop","index":0} + +event: message_delta +data: {"type":"message_delta","delta":{"stop_reason":"end_turn"},"usage":{"output_tokens":12}} + +event: message_stop +data: {"type":"message_stop"} +``` diff --git a/typescript/claude-api/tool-use.md b/typescript/claude-api/tool-use.md new file mode 100644 index 0000000..98fe131 --- /dev/null +++ b/typescript/claude-api/tool-use.md @@ -0,0 +1,477 @@ +# Tool Use — TypeScript + +For conceptual overview (tool definitions, tool choice, tips), see [shared/tool-use-concepts.md](../../shared/tool-use-concepts.md). + +## Tool Runner (Recommended) + +**Beta:** The tool runner is in beta in the TypeScript SDK. + +Use `betaZodTool` with Zod schemas to define tools with a `run` function, then pass them to `client.beta.messages.toolRunner()`: + +```typescript +import Anthropic from "@anthropic-ai/sdk"; +import { betaZodTool } from "@anthropic-ai/sdk/helpers/beta/zod"; +import { z } from "zod"; + +const client = new Anthropic(); + +const getWeather = betaZodTool({ + name: "get_weather", + description: "Get current weather for a location", + inputSchema: z.object({ + location: z.string().describe("City and state, e.g., San Francisco, CA"), + unit: z.enum(["celsius", "fahrenheit"]).optional(), + }), + run: async (input) => { + // Your implementation here + return `72°F and sunny in ${input.location}`; + }, +}); + +// The tool runner handles the agentic loop and returns the final message +const finalMessage = await client.beta.messages.toolRunner({ + model: "claude-opus-4-6", + max_tokens: 4096, + tools: [getWeather], + messages: [{ role: "user", content: "What's the weather in Paris?" }], +}); + +console.log(finalMessage.content); +``` + +**Key benefits of the tool runner:** + +- No manual loop — the SDK handles calling tools and feeding results back +- Type-safe tool inputs via Zod schemas +- Tool schemas are generated automatically from Zod definitions +- Iteration stops automatically when Claude has no more tool calls + +--- + +## Manual Agentic Loop + +Use this when you need fine-grained control (custom logging, conditional tool execution, streaming individual iterations, human-in-the-loop approval): + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); +const tools: Anthropic.Tool[] = [...]; // Your tool definitions +let messages: Anthropic.MessageParam[] = [{ role: "user", content: userInput }]; + +while (true) { + const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 4096, + tools: tools, + messages: messages, + }); + + if (response.stop_reason === "end_turn") break; + + // Server-side tool hit iteration limit; re-send to continue + if (response.stop_reason === "pause_turn") { + messages = [ + { role: "user", content: userInput }, + { role: "assistant", content: response.content }, + ]; + continue; + } + + const toolUseBlocks = response.content.filter( + (b): b is Anthropic.ToolUseBlock => b.type === "tool_use", + ); + + messages.push({ role: "assistant", content: response.content }); + + const toolResults: Anthropic.ToolResultBlockParam[] = []; + for (const tool of toolUseBlocks) { + const result = await executeTool(tool.name, tool.input); + toolResults.push({ + type: "tool_result", + tool_use_id: tool.id, + content: result, + }); + } + + messages.push({ role: "user", content: toolResults }); +} +``` + +### Streaming Manual Loop + +Use `client.messages.stream()` + `finalMessage()` instead of `.create()` when you need streaming within a manual loop. Text deltas are streamed on each iteration; `finalMessage()` collects the complete `Message` so you can inspect `stop_reason` and extract tool-use blocks: + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); +const tools: Anthropic.Tool[] = [...]; +let messages: Anthropic.MessageParam[] = [{ role: "user", content: userInput }]; + +while (true) { + const stream = client.messages.stream({ + model: "claude-opus-4-6", + max_tokens: 4096, + tools, + messages, + }); + + // Stream text deltas on each iteration + stream.on("text", (delta) => { + process.stdout.write(delta); + }); + + // finalMessage() resolves with the complete Message — no need to + // manually wire up .on("message") / .on("error") / .on("abort") + const message = await stream.finalMessage(); + + if (message.stop_reason === "end_turn") break; + + // Server-side tool hit iteration limit; re-send to continue + if (message.stop_reason === "pause_turn") { + messages = [ + { role: "user", content: userInput }, + { role: "assistant", content: message.content }, + ]; + continue; + } + + const toolUseBlocks = message.content.filter( + (b): b is Anthropic.ToolUseBlock => b.type === "tool_use", + ); + + messages.push({ role: "assistant", content: message.content }); + + const toolResults: Anthropic.ToolResultBlockParam[] = []; + for (const tool of toolUseBlocks) { + const result = await executeTool(tool.name, tool.input); + toolResults.push({ + type: "tool_result", + tool_use_id: tool.id, + content: result, + }); + } + + messages.push({ role: "user", content: toolResults }); +} +``` + +> **Important:** Don't wrap `.on()` events in `new Promise()` to collect the final message — use `stream.finalMessage()` instead. The SDK handles all error/abort/completion states internally. + +> **Error handling in the loop:** Use the SDK's typed exceptions (e.g., `Anthropic.RateLimitError`, `Anthropic.APIError`) — see [Error Handling](./README.md#error-handling) for examples. Don't check error messages with string matching. + +> **SDK types:** Use `Anthropic.MessageParam`, `Anthropic.Tool`, `Anthropic.ToolUseBlock`, `Anthropic.ToolResultBlockParam`, `Anthropic.Message`, etc. for all API-related data structures. Don't redefine equivalent interfaces. + +--- + +## Handling Tool Results + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + tools: tools, + messages: [{ role: "user", content: "What's the weather in Paris?" }], +}); + +for (const block of response.content) { + if (block.type === "tool_use") { + const result = await executeTool(block.name, block.input); + + const followup = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + tools: tools, + messages: [ + { role: "user", content: "What's the weather in Paris?" }, + { role: "assistant", content: response.content }, + { + role: "user", + content: [ + { type: "tool_result", tool_use_id: block.id, content: result }, + ], + }, + ], + }); + } +} +``` + +--- + +## Tool Choice + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + tools: tools, + tool_choice: { type: "tool", name: "get_weather" }, + messages: [{ role: "user", content: "What's the weather in Paris?" }], +}); +``` + +--- + +## Code Execution + +### Basic Usage + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); + +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 4096, + messages: [ + { + role: "user", + content: + "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", + }, + ], + tools: [{ type: "code_execution_20260120", name: "code_execution" }], +}); +``` + +### Upload Files for Analysis + +```typescript +import Anthropic, { toFile } from "@anthropic-ai/sdk"; +import { createReadStream } from "fs"; + +const client = new Anthropic(); + +// 1. Upload a file +const uploaded = await client.beta.files.upload({ + file: await toFile(createReadStream("sales_data.csv"), undefined, { + type: "text/csv", + }), + betas: ["files-api-2025-04-14"], +}); + +// 2. Pass to code execution +// Code execution is GA; Files API is still beta (pass via RequestOptions) +const response = await client.messages.create( + { + model: "claude-opus-4-6", + max_tokens: 4096, + messages: [ + { + role: "user", + content: [ + { + type: "text", + text: "Analyze this sales data. Show trends and create a visualization.", + }, + { type: "container_upload", file_id: uploaded.id }, + ], + }, + ], + tools: [{ type: "code_execution_20260120", name: "code_execution" }], + }, + { headers: { "anthropic-beta": "files-api-2025-04-14" } }, +); +``` + +### Retrieve Generated Files + +```typescript +import path from "path"; +import fs from "fs"; + +const OUTPUT_DIR = "./claude_outputs"; +await fs.promises.mkdir(OUTPUT_DIR, { recursive: true }); + +for (const block of response.content) { + if (block.type === "bash_code_execution_tool_result") { + const result = block.content; + if (result.type === "bash_code_execution_result" && result.content) { + for (const fileRef of result.content) { + if (fileRef.type === "bash_code_execution_output") { + const metadata = await client.beta.files.retrieveMetadata( + fileRef.file_id, + ); + const response = await client.beta.files.download(fileRef.file_id); + const fileBytes = Buffer.from(await response.arrayBuffer()); + const safeName = path.basename(metadata.filename); + if (!safeName || safeName === "." || safeName === "..") { + console.warn(`Skipping invalid filename: ${metadata.filename}`); + continue; + } + const outputPath = path.join(OUTPUT_DIR, safeName); + await fs.promises.writeFile(outputPath, fileBytes); + console.log(`Saved: ${outputPath}`); + } + } + } + } +} +``` + +### Container Reuse + +```typescript +// First request: set up environment +const response1 = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 4096, + messages: [ + { + role: "user", + content: "Install tabulate and create data.json with sample user data", + }, + ], + tools: [{ type: "code_execution_20260120", name: "code_execution" }], +}); + +// Reuse container +const containerId = response1.container.id; + +const response2 = await client.messages.create({ + container: containerId, + model: "claude-opus-4-6", + max_tokens: 4096, + messages: [ + { + role: "user", + content: "Read data.json and display as a formatted table", + }, + ], + tools: [{ type: "code_execution_20260120", name: "code_execution" }], +}); +``` + +--- + +## Memory Tool + +### Basic Usage + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 2048, + messages: [ + { + role: "user", + content: "Remember that my preferred language is TypeScript.", + }, + ], + tools: [{ type: "memory_20250818", name: "memory" }], +}); +``` + +### SDK Memory Helper + +Use `betaMemoryTool` with a `MemoryToolHandlers` implementation: + +```typescript +import { + betaMemoryTool, + type MemoryToolHandlers, +} from "@anthropic-ai/sdk/helpers/beta/memory"; + +const handlers: MemoryToolHandlers = { + async view(command) { ... }, + async create(command) { ... }, + async str_replace(command) { ... }, + async insert(command) { ... }, + async delete(command) { ... }, + async rename(command) { ... }, +}; + +const memory = betaMemoryTool(handlers); + +const runner = client.beta.messages.toolRunner({ + model: "claude-opus-4-6", + max_tokens: 2048, + tools: [memory], + messages: [{ role: "user", content: "Remember my preferences" }], +}); + +for await (const message of runner) { + console.log(message); +} +``` + +For full implementation examples, use WebFetch: + +- `https://github.com/anthropics/anthropic-sdk-typescript/blob/main/examples/tools-helpers-memory.ts` + +--- + +## Structured Outputs + +### JSON Outputs (Zod — Recommended) + +```typescript +import Anthropic from "@anthropic-ai/sdk"; +import { z } from "zod"; +import { zodOutputFormat } from "@anthropic-ai/sdk/helpers/zod"; + +const ContactInfoSchema = z.object({ + name: z.string(), + email: z.string(), + plan: z.string(), + interests: z.array(z.string()), + demo_requested: z.boolean(), +}); + +const client = new Anthropic(); + +const response = await client.messages.parse({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { + role: "user", + content: + "Extract: Jane Doe (jane@co.com) wants Enterprise, interested in API and SDKs, wants a demo.", + }, + ], + output_config: { + format: zodOutputFormat(ContactInfoSchema), + }, +}); + +console.log(response.parsed_output.name); // "Jane Doe" +``` + +### Strict Tool Use + +```typescript +const response = await client.messages.create({ + model: "claude-opus-4-6", + max_tokens: 1024, + messages: [ + { + role: "user", + content: "Book a flight to Tokyo for 2 passengers on March 15", + }, + ], + tools: [ + { + name: "book_flight", + description: "Book a flight to a destination", + strict: true, + input_schema: { + type: "object", + properties: { + destination: { type: "string" }, + date: { type: "string", format: "date" }, + passengers: { + type: "integer", + enum: [1, 2, 3, 4, 5, 6, 7, 8], + }, + }, + required: ["destination", "date", "passengers"], + additionalProperties: false, + }, + }, + ], +}); +```