diff --git a/web/app/components/workflow/skill/start-tab/templates/registry.ts b/web/app/components/workflow/skill/start-tab/templates/registry.ts index 19239e6fd9..764ae143ef 100644 --- a/web/app/components/workflow/skill/start-tab/templates/registry.ts +++ b/web/app/components/workflow/skill/start-tab/templates/registry.ts @@ -24,6 +24,13 @@ export const SKILL_TEMPLATES: SkillTemplateEntry[] = [ fileCount: 82, loadContent: () => import('./skills/canvas-design').then(m => m.default), }, + { + id: 'claude-api', + name: 'claude-api', + description: 'Build apps with the Claude API or Anthropic SDK. TRIGGER when: code imports `anthropic`/`@anthropic-ai/sdk`/`claude_agent_sdk`, or user asks to use Claude API, Anthropic SDKs, or Agent SDK. DO NOT TRIGGER when: code imports `openai`/other AI SDK, general programming, or ML/data-science tasks.', + fileCount: 26, + loadContent: () => import('./skills/claude-api').then(m => m.default), + }, { id: 'doc-coauthoring', name: 'doc-coauthoring', @@ -34,7 +41,7 @@ export const SKILL_TEMPLATES: SkillTemplateEntry[] = [ { id: 'docx', name: 'docx', - description: 'Use this skill whenever the user wants to create, read, edit, or manipulate Word documents (.docx files). Triggers include: any mention of \\"Word doc\\", \\"word document\\", \\".docx\\", or requests to produce professional documents with formatting like tables of contents, headings, page numbers, or letterheads. Also use when extracting or reorganizing content from .docx files, inserting or replacing images in documents, performing find-and-replace in Word files, working with tracked changes or comments, or converting content into a polished Word document. If the user asks for a \\"report\\", \\"memo\\", \\"letter\\", \\"template\\", or similar deliverable as a Word or .docx file, use this skill. Do NOT use for PDFs, spreadsheets, Google Docs, or general coding tasks unrelated to document generation.', + description: 'Use this skill whenever the user wants to create, read, edit, or manipulate Word documents (.docx files). Triggers include: any mention of \'Word doc\', \'word document\', \'.docx\', or requests to produce professional documents with formatting like tables of contents, headings, page numbers, or letterheads. Also use when extracting or reorganizing content from .docx files, inserting or replacing images in documents, performing find-and-replace in Word files, working with tracked changes or comments, or converting content into a polished Word document. If the user asks for a \'report\', \'memo\', \'letter\', \'template\', or similar deliverable as a Word or .docx file, use this skill. Do NOT use for PDFs, spreadsheets, Google Docs, or general coding tasks unrelated to document generation.', fileCount: 60, loadContent: () => import('./skills/docx').then(m => m.default), }, @@ -76,8 +83,8 @@ export const SKILL_TEMPLATES: SkillTemplateEntry[] = [ { id: 'skill-creator', name: 'skill-creator', - description: 'Guide for creating effective skills. This skill should be used when users want to create a new skill (or update an existing skill) that extends Claude\'s capabilities with specialized knowledge, workflows, or tool integrations.', - fileCount: 6, + description: 'Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, edit, or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill\'s description for better triggering accuracy.', + fileCount: 17, loadContent: () => import('./skills/skill-creator').then(m => m.default), }, { diff --git a/web/app/components/workflow/skill/start-tab/templates/skills/claude-api.ts b/web/app/components/workflow/skill/start-tab/templates/skills/claude-api.ts new file mode 100644 index 0000000000..6c10c42715 --- /dev/null +++ b/web/app/components/workflow/skill/start-tab/templates/skills/claude-api.ts @@ -0,0 +1,216 @@ +// AUTO-GENERATED — DO NOT EDIT +// Source: https://github.com/anthropics/skills +import type { SkillTemplateNode } from '../types' + +const children: SkillTemplateNode[] = [ + { + "name": "SKILL.md", + "node_type": "file", + "content": "---\nname: claude-api\ndescription: \"Build apps with the Claude API or Anthropic SDK. TRIGGER when: code imports `anthropic`/`@anthropic-ai/sdk`/`claude_agent_sdk`, or user asks to use Claude API, Anthropic SDKs, or Agent SDK. DO NOT TRIGGER when: code imports `openai`/other AI SDK, general programming, or ML/data-science tasks.\"\nlicense: Complete terms in LICENSE.txt\n---\n\n# Building LLM-Powered Applications with Claude\n\nThis skill helps you build LLM-powered applications with Claude. Choose the right surface based on your needs, detect the project language, then read the relevant language-specific documentation.\n\n## Defaults\n\nUnless the user requests otherwise:\n\nFor the Claude model version, please use Claude Opus 4.6, which you can access via the exact model string `claude-opus-4-6`. Please default to using adaptive thinking (`thinking: {type: \"adaptive\"}`) for anything remotely complicated. And finally, please default to streaming for any request that may involve long input, long output, or high `max_tokens` — it prevents hitting request timeouts. Use the SDK's `.get_final_message()` / `.finalMessage()` helper to get the complete response if you don't need to handle individual stream events\n\n---\n\n## Language Detection\n\nBefore reading code examples, determine which language the user is working in:\n\n1. **Look at project files** to infer the language:\n\n - `*.py`, `requirements.txt`, `pyproject.toml`, `setup.py`, `Pipfile` → **Python** — read from `python/`\n - `*.ts`, `*.tsx`, `package.json`, `tsconfig.json` → **TypeScript** — read from `typescript/`\n - `*.js`, `*.jsx` (no `.ts` files present) → **TypeScript** — JS uses the same SDK, read from `typescript/`\n - `*.java`, `pom.xml`, `build.gradle` → **Java** — read from `java/`\n - `*.kt`, `*.kts`, `build.gradle.kts` → **Java** — Kotlin uses the Java SDK, read from `java/`\n - `*.scala`, `build.sbt` → **Java** — Scala uses the Java SDK, read from `java/`\n - `*.go`, `go.mod` → **Go** — read from `go/`\n - `*.rb`, `Gemfile` → **Ruby** — read from `ruby/`\n - `*.cs`, `*.csproj` → **C#** — read from `csharp/`\n - `*.php`, `composer.json` → **PHP** — read from `php/`\n\n2. **If multiple languages detected** (e.g., both Python and TypeScript files):\n\n - Check which language the user's current file or question relates to\n - If still ambiguous, ask: \"I detected both Python and TypeScript files. Which language are you using for the Claude API integration?\"\n\n3. **If language can't be inferred** (empty project, no source files, or unsupported language):\n\n - Use AskUserQuestion with options: Python, TypeScript, Java, Go, Ruby, cURL/raw HTTP, C#, PHP\n - If AskUserQuestion is unavailable, default to Python examples and note: \"Showing Python examples. Let me know if you need a different language.\"\n\n4. **If unsupported language detected** (Rust, Swift, C++, Elixir, etc.):\n\n - Suggest cURL/raw HTTP examples from `curl/` and note that community SDKs may exist\n - Offer to show Python or TypeScript examples as reference implementations\n\n5. **If user needs cURL/raw HTTP examples**, read from `curl/`.\n\n### Language-Specific Feature Support\n\n| Language | Tool Runner | Agent SDK | Notes |\n| ---------- | ----------- | --------- | ------------------------------------- |\n| Python | Yes (beta) | Yes | Full support — `@beta_tool` decorator |\n| TypeScript | Yes (beta) | Yes | Full support — `betaZodTool` + Zod |\n| Java | Yes (beta) | No | Beta tool use with annotated classes |\n| Go | Yes (beta) | No | `BetaToolRunner` in `toolrunner` pkg |\n| Ruby | Yes (beta) | No | `BaseTool` + `tool_runner` in beta |\n| cURL | N/A | N/A | Raw HTTP, no SDK features |\n| C# | No | No | Official SDK |\n| PHP | Yes (beta) | No | `BetaRunnableTool` + `toolRunner()` |\n\n---\n\n## Which Surface Should I Use?\n\n> **Start simple.** Default to the simplest tier that meets your needs. Single API calls and workflows handle most use cases — only reach for agents when the task genuinely requires open-ended, model-driven exploration.\n\n| Use Case | Tier | Recommended Surface | Why |\n| ----------------------------------------------- | --------------- | ------------------------- | --------------------------------------- |\n| Classification, summarization, extraction, Q&A | Single LLM call | **Claude API** | One request, one response |\n| Batch processing or embeddings | Single LLM call | **Claude API** | Specialized endpoints |\n| Multi-step pipelines with code-controlled logic | Workflow | **Claude API + tool use** | You orchestrate the loop |\n| Custom agent with your own tools | Agent | **Claude API + tool use** | Maximum flexibility |\n| AI agent with file/web/terminal access | Agent | **Agent SDK** | Built-in tools, safety, and MCP support |\n| Agentic coding assistant | Agent | **Agent SDK** | Designed for this use case |\n| Want built-in permissions and guardrails | Agent | **Agent SDK** | Safety features included |\n\n> **Note:** The Agent SDK is for when you want built-in file/web/terminal tools, permissions, and MCP out of the box. If you want to build an agent with your own tools, Claude API is the right choice — use the tool runner for automatic loop handling, or the manual loop for fine-grained control (approval gates, custom logging, conditional execution).\n\n### Decision Tree\n\n```\nWhat does your application need?\n\n1. Single LLM call (classification, summarization, extraction, Q&A)\n └── Claude API — one request, one response\n\n2. Does Claude need to read/write files, browse the web, or run shell commands\n as part of its work? (Not: does your app read a file and hand it to Claude —\n does Claude itself need to discover and access files/web/shell?)\n └── Yes → Agent SDK — built-in tools, don't reimplement them\n Examples: \"scan a codebase for bugs\", \"summarize every file in a directory\",\n \"find bugs using subagents\", \"research a topic via web search\"\n\n3. Workflow (multi-step, code-orchestrated, with your own tools)\n └── Claude API with tool use — you control the loop\n\n4. Open-ended agent (model decides its own trajectory, your own tools)\n └── Claude API agentic loop (maximum flexibility)\n```\n\n### Should I Build an Agent?\n\nBefore choosing the agent tier, check all four criteria:\n\n- **Complexity** — Is the task multi-step and hard to fully specify in advance? (e.g., \"turn this design doc into a PR\" vs. \"extract the title from this PDF\")\n- **Value** — Does the outcome justify higher cost and latency?\n- **Viability** — Is Claude capable at this task type?\n- **Cost of error** — Can errors be caught and recovered from? (tests, review, rollback)\n\nIf the answer is \"no\" to any of these, stay at a simpler tier (single call or workflow).\n\n---\n\n## Architecture\n\nEverything goes through `POST /v1/messages`. Tools and output constraints are features of this single endpoint — not separate APIs.\n\n**User-defined tools** — You define tools (via decorators, Zod schemas, or raw JSON), and the SDK's tool runner handles calling the API, executing your functions, and looping until Claude is done. For full control, you can write the loop manually.\n\n**Server-side tools** — Anthropic-hosted tools that run on Anthropic's infrastructure. Code execution is fully server-side (declare it in `tools`, Claude runs code automatically). Computer use can be server-hosted or self-hosted.\n\n**Structured outputs** — Constrains the Messages API response format (`output_config.format`) and/or tool parameter validation (`strict: true`). The recommended approach is `client.messages.parse()` which validates responses against your schema automatically. Note: the old `output_format` parameter is deprecated; use `output_config: {format: {...}}` on `messages.create()`.\n\n**Supporting endpoints** — Batches (`POST /v1/messages/batches`), Files (`POST /v1/files`), Token Counting, and Models (`GET /v1/models`, `GET /v1/models/{id}` — live capability/context-window discovery) feed into or support Messages API requests.\n\n---\n\n## Current Models (cached: 2026-02-17)\n\n| Model | Model ID | Context | Input $/1M | Output $/1M |\n| ----------------- | ------------------- | -------------- | ---------- | ----------- |\n| Claude Opus 4.6 | `claude-opus-4-6` | 200K (1M beta) | $5.00 | $25.00 |\n| Claude Sonnet 4.6 | `claude-sonnet-4-6` | 200K (1M beta) | $3.00 | $15.00 |\n| Claude Haiku 4.5 | `claude-haiku-4-5` | 200K | $1.00 | $5.00 |\n\n**ALWAYS use `claude-opus-4-6` unless the user explicitly names a different model.** This is non-negotiable. Do not use `claude-sonnet-4-6`, `claude-sonnet-4-5`, or any other model unless the user literally says \"use sonnet\" or \"use haiku\". Never downgrade for cost — that's the user's decision, not yours.\n\n**CRITICAL: Use only the exact model ID strings from the table above — they are complete as-is. Do not append date suffixes.** For example, use `claude-sonnet-4-5`, never `claude-sonnet-4-5-20250514` or any other date-suffixed variant you might recall from training data. If the user requests an older model not in the table (e.g., \"opus 4.5\", \"sonnet 3.7\"), read `shared/models.md` for the exact ID — do not construct one yourself.\n\nA note: if any of the model strings above look unfamiliar to you, that's to be expected — that just means they were released after your training data cutoff. Rest assured they are real models; we wouldn't mess with you like that.\n\n**Live capability lookup:** The table above is cached. When the user asks \"what's the context window for X\", \"does X support vision/thinking/effort\", or \"which models support Y\", query the Models API (`client.models.retrieve(id)` / `client.models.list()`) — see `shared/models.md` for the field reference and capability-filter examples.\n\n---\n\n## Thinking & Effort (Quick Reference)\n\n**Opus 4.6 — Adaptive thinking (recommended):** Use `thinking: {type: \"adaptive\"}`. Claude dynamically decides when and how much to think. No `budget_tokens` needed — `budget_tokens` is deprecated on Opus 4.6 and Sonnet 4.6 and must not be used. Adaptive thinking also automatically enables interleaved thinking (no beta header needed). **When the user asks for \"extended thinking\", a \"thinking budget\", or `budget_tokens`: always use Opus 4.6 with `thinking: {type: \"adaptive\"}`. The concept of a fixed token budget for thinking is deprecated — adaptive thinking replaces it. Do NOT use `budget_tokens` and do NOT switch to an older model.**\n\n**Effort parameter (GA, no beta header):** Controls thinking depth and overall token spend via `output_config: {effort: \"low\"|\"medium\"|\"high\"|\"max\"}` (inside `output_config`, not top-level). Default is `high` (equivalent to omitting it). `max` is Opus 4.6 only. Works on Opus 4.5, Opus 4.6, and Sonnet 4.6. Will error on Sonnet 4.5 / Haiku 4.5. Combine with adaptive thinking for the best cost-quality tradeoffs. Use `low` for subagents or simple tasks; `max` for the deepest reasoning.\n\n**Sonnet 4.6:** Supports adaptive thinking (`thinking: {type: \"adaptive\"}`). `budget_tokens` is deprecated on Sonnet 4.6 — use adaptive thinking instead.\n\n**Older models (only if explicitly requested):** If the user specifically asks for Sonnet 4.5 or another older model, use `thinking: {type: \"enabled\", budget_tokens: N}`. `budget_tokens` must be less than `max_tokens` (minimum 1024). Never choose an older model just because the user mentions `budget_tokens` — use Opus 4.6 with adaptive thinking instead.\n\n---\n\n## Compaction (Quick Reference)\n\n**Beta, Opus 4.6 and Sonnet 4.6.** For long-running conversations that may exceed the 200K context window, enable server-side compaction. The API automatically summarizes earlier context when it approaches the trigger threshold (default: 150K tokens). Requires beta header `compact-2026-01-12`.\n\n**Critical:** Append `response.content` (not just the text) back to your messages on every turn. Compaction blocks in the response must be preserved — the API uses them to replace the compacted history on the next request. Extracting only the text string and appending that will silently lose the compaction state.\n\nSee `{lang}/claude-api/README.md` (Compaction section) for code examples. Full docs via WebFetch in `shared/live-sources.md`.\n\n---\n\n## Prompt Caching (Quick Reference)\n\n**Prefix match.** Any byte change anywhere in the prefix invalidates everything after it. Render order is `tools` → `system` → `messages`. Keep stable content first (frozen system prompt, deterministic tool list), put volatile content (timestamps, per-request IDs, varying questions) after the last `cache_control` breakpoint.\n\n**Top-level auto-caching** (`cache_control: {type: \"ephemeral\"}` on `messages.create()`) is the simplest option when you don't need fine-grained placement. Max 4 breakpoints per request. Minimum cacheable prefix is ~1024 tokens — shorter prefixes silently won't cache.\n\n**Verify with `usage.cache_read_input_tokens`** — if it's zero across repeated requests, a silent invalidator is at work (`datetime.now()` in system prompt, unsorted JSON, varying tool set).\n\nFor placement patterns, architectural guidance, and the silent-invalidator audit checklist: read `shared/prompt-caching.md`. Language-specific syntax: `{lang}/claude-api/README.md` (Prompt Caching section).\n\n---\n\n## Reading Guide\n\nAfter detecting the language, read the relevant files based on what the user needs:\n\n### Quick Task Reference\n\n**Single text classification/summarization/extraction/Q&A:**\n→ Read only `{lang}/claude-api/README.md`\n\n**Chat UI or real-time response display:**\n→ Read `{lang}/claude-api/README.md` + `{lang}/claude-api/streaming.md`\n\n**Long-running conversations (may exceed context window):**\n→ Read `{lang}/claude-api/README.md` — see Compaction section\n\n**Prompt caching / optimize caching / \"why is my cache hit rate low\":**\n→ Read `shared/prompt-caching.md` + `{lang}/claude-api/README.md` (Prompt Caching section)\n\n**Function calling / tool use / agents:**\n→ Read `{lang}/claude-api/README.md` + `shared/tool-use-concepts.md` + `{lang}/claude-api/tool-use.md`\n\n**Batch processing (non-latency-sensitive):**\n→ Read `{lang}/claude-api/README.md` + `{lang}/claude-api/batches.md`\n\n**File uploads across multiple requests:**\n→ Read `{lang}/claude-api/README.md` + `{lang}/claude-api/files-api.md`\n\n**Agent with built-in tools (file/web/terminal):**\n→ Read `{lang}/agent-sdk/README.md` + `{lang}/agent-sdk/patterns.md`\n\n### Claude API (Full File Reference)\n\nRead the **language-specific Claude API folder** (`{language}/claude-api/`):\n\n1. **`{language}/claude-api/README.md`** — **Read this first.** Installation, quick start, common patterns, error handling.\n2. **`shared/tool-use-concepts.md`** — Read when the user needs function calling, code execution, memory, or structured outputs. Covers conceptual foundations.\n3. **`{language}/claude-api/tool-use.md`** — Read for language-specific tool use code examples (tool runner, manual loop, code execution, memory, structured outputs).\n4. **`{language}/claude-api/streaming.md`** — Read when building chat UIs or interfaces that display responses incrementally.\n5. **`{language}/claude-api/batches.md`** — Read when processing many requests offline (not latency-sensitive). Runs asynchronously at 50% cost.\n6. **`{language}/claude-api/files-api.md`** — Read when sending the same file across multiple requests without re-uploading.\n7. **`shared/prompt-caching.md`** — Read when adding or optimizing prompt caching. Covers prefix-stability design, breakpoint placement, and anti-patterns that silently invalidate cache.\n8. **`shared/error-codes.md`** — Read when debugging HTTP errors or implementing error handling.\n9. **`shared/live-sources.md`** — WebFetch URLs for fetching the latest official documentation.\n\n> **Note:** For Java, Go, Ruby, C#, PHP, and cURL — these have a single file each covering all basics. Read that file plus `shared/tool-use-concepts.md` and `shared/error-codes.md` as needed.\n\n### Agent SDK\n\nRead the **language-specific Agent SDK folder** (`{language}/agent-sdk/`). Agent SDK is available for **Python and TypeScript only**.\n\n1. **`{language}/agent-sdk/README.md`** — Installation, quick start, built-in tools, permissions, MCP, hooks.\n2. **`{language}/agent-sdk/patterns.md`** — Custom tools, hooks, subagents, MCP integration, session resumption.\n3. **`shared/live-sources.md`** — WebFetch URLs for current Agent SDK docs.\n\n---\n\n## When to Use WebFetch\n\nUse WebFetch to get the latest documentation when:\n\n- User asks for \"latest\" or \"current\" information\n- Cached data seems incorrect\n- User asks about features not covered here\n\nLive documentation URLs are in `shared/live-sources.md`.\n\n## Common Pitfalls\n\n- Don't truncate inputs when passing files or content to the API. If the content is too long to fit in the context window, notify the user and discuss options (chunking, summarization, etc.) rather than silently truncating.\n- **Opus 4.6 / Sonnet 4.6 thinking:** Use `thinking: {type: \"adaptive\"}` — do NOT use `budget_tokens` (deprecated on both Opus 4.6 and Sonnet 4.6). For older models, `budget_tokens` must be less than `max_tokens` (minimum 1024). This will throw an error if you get it wrong.\n- **Opus 4.6 prefill removed:** Assistant message prefills (last-assistant-turn prefills) return a 400 error on Opus 4.6. Use structured outputs (`output_config.format`) or system prompt instructions to control response format instead.\n- **`max_tokens` defaults:** Don't lowball `max_tokens` — hitting the cap truncates output mid-thought and requires a retry. For non-streaming requests, default to `~16000` (keeps responses under SDK HTTP timeouts). For streaming requests, default to `~64000` (timeouts aren't a concern, so give the model room). Only go lower when you have a hard reason: classification (`~256`), cost caps, or deliberately short outputs.\n- **128K output tokens:** Opus 4.6 supports up to 128K `max_tokens`, but the SDKs require streaming for values that large to avoid HTTP timeouts. Use `.stream()` with `.get_final_message()` / `.finalMessage()`.\n- **Tool call JSON parsing (Opus 4.6):** Opus 4.6 may produce different JSON string escaping in tool call `input` fields (e.g., Unicode or forward-slash escaping). Always parse tool inputs with `json.loads()` / `JSON.parse()` — never do raw string matching on the serialized input.\n- **Structured outputs (all models):** Use `output_config: {format: {...}}` instead of the deprecated `output_format` parameter on `messages.create()`. This is a general API change, not 4.6-specific.\n- **Don't reimplement SDK functionality:** The SDK provides high-level helpers — use them instead of building from scratch. Specifically: use `stream.finalMessage()` instead of wrapping `.on()` events in `new Promise()`; use typed exception classes (`Anthropic.RateLimitError`, etc.) instead of string-matching error messages; use SDK types (`Anthropic.MessageParam`, `Anthropic.Tool`, `Anthropic.Message`, etc.) instead of redefining equivalent interfaces.\n- **Don't define custom types for SDK data structures:** The SDK exports types for all API objects. Use `Anthropic.MessageParam` for messages, `Anthropic.Tool` for tool definitions, `Anthropic.ToolUseBlock` / `Anthropic.ToolResultBlockParam` for tool results, `Anthropic.Message` for responses. Defining your own `interface ChatMessage { role: string; content: unknown }` duplicates what the SDK already provides and loses type safety.\n- **Report and document output:** For tasks that produce reports, documents, or visualizations, the code execution sandbox has `python-docx`, `python-pptx`, `matplotlib`, `pillow`, and `pypdf` pre-installed. Claude can generate formatted files (DOCX, PDF, charts) and return them via the Files API — consider this for \"report\" or \"document\" type requests instead of plain stdout text.\n" + }, + { + "name": "csharp", + "node_type": "folder", + "children": [ + { + "name": "claude-api.md", + "node_type": "file", + "content": "# Claude API — C#\n\n> **Note:** The C# SDK is the official Anthropic SDK for C#. Tool use is supported via the Messages API. A class-annotation-based tool runner is not available; use raw tool definitions with JSON schema. The SDK also supports Microsoft.Extensions.AI IChatClient integration with function invocation.\n\n## Installation\n\n```bash\ndotnet add package Anthropic\n```\n\n## Client Initialization\n\n```csharp\nusing Anthropic;\n\n// Default (uses ANTHROPIC_API_KEY env var)\nAnthropicClient client = new();\n\n// Explicit API key (use environment variables — never hardcode keys)\nAnthropicClient client = new() {\n ApiKey = Environment.GetEnvironmentVariable(\"ANTHROPIC_API_KEY\")\n};\n```\n\n---\n\n## Basic Message Request\n\n```csharp\nusing Anthropic.Models.Messages;\n\nvar parameters = new MessageCreateParams\n{\n Model = Model.ClaudeOpus4_6,\n MaxTokens = 16000,\n Messages = [new() { Role = Role.User, Content = \"What is the capital of France?\" }]\n};\nvar response = await client.Messages.Create(parameters);\n\n// ContentBlock is a union wrapper. .Value unwraps to the variant object,\n// then OfType filters to the type you want. Or use the TryPick* idiom\n// shown in the Thinking section below.\nforeach (var text in response.Content.Select(b => b.Value).OfType())\n{\n Console.WriteLine(text.Text);\n}\n```\n\n---\n\n## Streaming\n\n```csharp\nusing Anthropic.Models.Messages;\n\nvar parameters = new MessageCreateParams\n{\n Model = Model.ClaudeOpus4_6,\n MaxTokens = 64000,\n Messages = [new() { Role = Role.User, Content = \"Write a haiku\" }]\n};\n\nawait foreach (RawMessageStreamEvent streamEvent in client.Messages.CreateStreaming(parameters))\n{\n if (streamEvent.TryPickContentBlockDelta(out var delta) &&\n delta.Delta.TryPickText(out var text))\n {\n Console.Write(text.Text);\n }\n}\n```\n\n**`RawMessageStreamEvent` TryPick methods** (naming drops the `Message`/`Raw` prefix): `TryPickStart`, `TryPickDelta`, `TryPickStop`, `TryPickContentBlockStart`, `TryPickContentBlockDelta`, `TryPickContentBlockStop`. There is no `TryPickMessageStop` — use `TryPickStop`.\n\n---\n\n## Thinking\n\n**Adaptive thinking is the recommended mode for Claude 4.6+ models.** Claude decides dynamically when and how much to think.\n\n```csharp\nusing Anthropic.Models.Messages;\n\nvar response = await client.Messages.Create(new MessageCreateParams\n{\n Model = Model.ClaudeOpus4_6,\n MaxTokens = 16000,\n // ThinkingConfigParam? implicitly converts from the concrete variant classes —\n // no wrapper needed.\n Thinking = new ThinkingConfigAdaptive(),\n Messages =\n [\n new() { Role = Role.User, Content = \"Solve: 27 * 453\" },\n ],\n});\n\n// ThinkingBlock(s) precede TextBlock in Content. TryPick* narrows the union.\nforeach (var block in response.Content)\n{\n if (block.TryPickThinking(out ThinkingBlock? t))\n {\n Console.WriteLine($\"[thinking] {t.Thinking}\");\n }\n else if (block.TryPickText(out TextBlock? text))\n {\n Console.WriteLine(text.Text);\n }\n}\n```\n\n> **Deprecated:** `new ThinkingConfigEnabled { BudgetTokens = N }` (fixed-budget extended thinking) still works on Claude 4.6 but is deprecated. Use adaptive thinking above.\n\nAlternative to `TryPick*`: `.Select(b => b.Value).OfType()` (same LINQ pattern as the Basic Message example).\n\n---\n\n## Tool Use\n\n### Defining a tool\n\n`Tool` (NOT `ToolParam`) with an `InputSchema` record. `InputSchema.Type` is auto-set to `\"object\"` by the constructor — don't set it. `ToolUnion` has an implicit conversion from `Tool`, triggered by the collection expression `[...]`.\n\n```csharp\nusing System.Text.Json;\nusing Anthropic.Models.Messages;\n\nvar parameters = new MessageCreateParams\n{\n Model = Model.ClaudeSonnet4_6,\n MaxTokens = 16000,\n Tools = [\n new Tool {\n Name = \"get_weather\",\n Description = \"Get the current weather in a given location\",\n InputSchema = new() {\n Properties = new Dictionary {\n [\"location\"] = JsonSerializer.SerializeToElement(\n new { type = \"string\", description = \"City name\" }),\n },\n Required = [\"location\"],\n },\n },\n ],\n Messages = [new() { Role = Role.User, Content = \"Weather in Paris?\" }],\n};\n```\n\nDerived from `anthropic-sdk-csharp/src/Anthropic/Models/Messages/Tool.cs` and `ToolUnion.cs:799` (implicit conversion).\n\nSee [shared tool use concepts](../shared/tool-use-concepts.md) for the loop pattern.\n### Converting response content to the follow-up assistant message\n\nWhen echoing Claude's response back in the assistant turn, **there is no `.ToParam()` helper** — manually reconstruct each `ContentBlock` variant as its `*Param` counterpart. Do NOT use `new ContentBlockParam(block.Json)`: it compiles and serializes, but `.Value` stays `null` so `TryPick*`/`Validate()` fail (degraded JSON pass-through, not the typed path).\n\n```csharp\nusing Anthropic.Models.Messages;\n\nMessage response = await client.Messages.Create(parameters);\n\n// No .ToParam() — reconstruct per variant. Implicit conversions from each\n// *Param type to ContentBlockParam mean no explicit wrapper.\nList assistantContent = [];\nList toolResults = [];\nforeach (ContentBlock block in response.Content)\n{\n if (block.TryPickText(out TextBlock? text))\n {\n assistantContent.Add(new TextBlockParam { Text = text.Text });\n }\n else if (block.TryPickThinking(out ThinkingBlock? thinking))\n {\n // Signature MUST be preserved — the API rejects tampering\n assistantContent.Add(new ThinkingBlockParam\n {\n Thinking = thinking.Thinking,\n Signature = thinking.Signature,\n });\n }\n else if (block.TryPickRedactedThinking(out RedactedThinkingBlock? redacted))\n {\n assistantContent.Add(new RedactedThinkingBlockParam { Data = redacted.Data });\n }\n else if (block.TryPickToolUse(out ToolUseBlock? toolUse))\n {\n // ToolUseBlock has required Caller; ToolUseBlockParam.Caller is optional — don't copy it\n assistantContent.Add(new ToolUseBlockParam\n {\n ID = toolUse.ID,\n Name = toolUse.Name,\n Input = toolUse.Input,\n });\n // Execute the tool; collect ONE result per tool_use block — the API\n // rejects the follow-up if any tool_use ID lacks a matching tool_result.\n string result = ExecuteYourTool(toolUse.Name, toolUse.Input);\n toolResults.Add(new ToolResultBlockParam\n {\n ToolUseID = toolUse.ID,\n Content = result,\n });\n }\n}\n\n// Follow-up: prior messages + assistant echo + user tool_result(s)\nList followUpMessages =\n[\n .. parameters.Messages,\n new() { Role = Role.Assistant, Content = assistantContent },\n new() { Role = Role.User, Content = toolResults },\n];\n```\n\n`ToolResultBlockParam` has no tuple constructor — use the object initializer. `Content` is a string-or-list union; a plain `string` implicitly converts.\n\n---\n\n## Context Editing / Compaction (Beta)\n\n**Beta-namespace prefix is inconsistent** (source-verified against `src/Anthropic/Models/Beta/Messages/*.cs` @ 12.9.0). No prefix: `MessageCreateParams`, `MessageCountTokensParams`, `Role`. **Everything else has the `Beta` prefix**: `BetaMessageParam`, `BetaMessage`, `BetaContentBlock`, `BetaToolUseBlock`, all block param types. The unprefixed `Role` WILL collide with `Anthropic.Models.Messages.Role` if you import both namespaces (CS0104). Safest: import only Beta; if mixing, alias the beta `Role`:\n\n```csharp\nusing Anthropic.Models.Beta.Messages;\nusing NonBeta = Anthropic.Models.Messages; // only if you also need non-beta types\n// Now: MessageCreateParams, BetaMessageParam, Role (beta's), NonBeta.Role (if needed)\n```\n\n\n`BetaMessage.Content` is `IReadOnlyList` — a 15-variant discriminated union. Narrow with `TryPick*`. **Response `BetaContentBlock` is NOT assignable to param `BetaContentBlockParam`** — there's no `.ToParam()` in C#. Round-trip by converting each block:\n\n```csharp\nusing Anthropic.Models.Beta.Messages;\n\nvar betaParams = new MessageCreateParams // no Beta prefix — one of only 2 unprefixed\n{\n Model = Model.ClaudeOpus4_6,\n MaxTokens = 16000,\n Betas = [\"compact-2026-01-12\"],\n ContextManagement = new BetaContextManagementConfig\n {\n Edits = [new BetaCompact20260112Edit()],\n },\n Messages = messages,\n};\nBetaMessage resp = await client.Beta.Messages.Create(betaParams);\n\nforeach (BetaContentBlock block in resp.Content)\n{\n if (block.TryPickCompaction(out BetaCompactionBlock? compaction))\n {\n // Content is nullable — compaction can fail server-side\n Console.WriteLine($\"compaction summary: {compaction.Content}\");\n }\n}\n\n// Context-edit metadata lives on a separate nullable field\nif (resp.ContextManagement is { } ctx)\n{\n foreach (var edit in ctx.AppliedEdits)\n Console.WriteLine($\"cleared {edit.ClearedInputTokens} tokens\");\n}\n\n// ROUND-TRIP: BetaMessageParam.Content is BetaMessageParamContent (a string|list\n// union). It implicit-converts from List, NOT from the\n// response's IReadOnlyList. Convert each block:\nList paramBlocks = [];\nforeach (var b in resp.Content)\n{\n if (b.TryPickText(out var t)) paramBlocks.Add(new BetaTextBlockParam { Text = t.Text });\n else if (b.TryPickCompaction(out var c)) paramBlocks.Add(new BetaCompactionBlockParam { Content = c.Content });\n // ... other variants as needed\n}\nmessages.Add(new BetaMessageParam { Role = Role.Assistant, Content = paramBlocks });\n```\n\nAll 15 `BetaContentBlock.TryPick*` variants: `Text`, `Thinking`, `RedactedThinking`, `ToolUse`, `ServerToolUse`, `WebSearchToolResult`, `WebFetchToolResult`, `CodeExecutionToolResult`, `BashCodeExecutionToolResult`, `TextEditorCodeExecutionToolResult`, `ToolSearchToolResult`, `McpToolUse`, `McpToolResult`, `ContainerUpload`, `Compaction`.\n\n**`BetaToolUseBlock.Input` is `IReadOnlyDictionary`** — index by key then call the `JsonElement` extractor:\n\n```csharp\nif (block.TryPickToolUse(out BetaToolUseBlock? tu))\n{\n int a = tu.Input[\"a\"].GetInt32();\n string s = tu.Input[\"name\"].GetString()!;\n}\n```\n\n---\n\n## Effort Parameter\n\nEffort is nested under `OutputConfig`, NOT a top-level property. `ApiEnum` has an implicit conversion from the enum, so assign `Effort.High` directly.\n\n```csharp\nOutputConfig = new OutputConfig { Effort = Effort.High },\n```\n\nValues: `Effort.Low`, `Effort.Medium`, `Effort.High`, `Effort.Max`. Combine with `Thinking = new ThinkingConfigAdaptive()` for cost-quality control.\n\n---\n\n## Prompt Caching\n\n`System` takes `MessageCreateParamsSystem?` — a union of `string` or `List`. There is no `SystemTextBlockParam`; use plain `TextBlockParam`. The implicit conversion needs the concrete `List` type (array literals won't convert). For placement patterns and the silent-invalidator audit checklist, see `shared/prompt-caching.md`.\n\n```csharp\nSystem = new List {\n new() {\n Text = longSystemPrompt,\n CacheControl = new CacheControlEphemeral(), // auto-sets Type = \"ephemeral\"\n },\n},\n```\n\nOptional `Ttl` on `CacheControlEphemeral`: `new() { Ttl = Ttl.Ttl1h }` or `Ttl.Ttl5m`. `CacheControl` also exists on `Tool.CacheControl` and top-level `MessageCreateParams.CacheControl`.\n\nVerify hits via `response.Usage.CacheCreationInputTokens` / `response.Usage.CacheReadInputTokens`.\n\n---\n\n## Token Counting\n\n```csharp\nMessageTokensCount result = await client.Messages.CountTokens(new MessageCountTokensParams {\n Model = Model.ClaudeOpus4_6,\n Messages = [new() { Role = Role.User, Content = \"Hello\" }],\n});\nlong tokens = result.InputTokens;\n```\n\n`MessageCountTokensParams.Tools` uses a different union type (`MessageCountTokensTool`) than `MessageCreateParams.Tools` (`ToolUnion`) — if you're passing tools, the compiler will tell you when it matters.\n\n---\n\n## Structured Output\n\n```csharp\nOutputConfig = new OutputConfig {\n Format = new JsonOutputFormat {\n Schema = new Dictionary {\n [\"type\"] = JsonSerializer.SerializeToElement(\"object\"),\n [\"properties\"] = JsonSerializer.SerializeToElement(\n new { name = new { type = \"string\" } }),\n [\"required\"] = JsonSerializer.SerializeToElement(new[] { \"name\" }),\n },\n },\n},\n```\n\n`JsonOutputFormat.Type` is auto-set to `\"json_schema\"` by the constructor. `Schema` is `required`.\n\n---\n\n## PDF / Document Input\n\n`DocumentBlockParam` takes a `DocumentBlockParamSource` union: `Base64PdfSource` / `UrlPdfSource` / `PlainTextSource` / `ContentBlockSource`. `Base64PdfSource` auto-sets `MediaType = \"application/pdf\"` and `Type = \"base64\"`.\n\n```csharp\nnew MessageParam {\n Role = Role.User,\n Content = new List {\n new DocumentBlockParam { Source = new Base64PdfSource { Data = base64String } },\n new TextBlockParam { Text = \"Summarize this PDF\" },\n },\n}\n```\n\n---\n\n## Server-Side Tools\n\nWeb search, bash, text editor, and code execution are built-in server tools. Type names are version-suffixed; constructors auto-set `name`/`type`. All implicit-convert to `ToolUnion`.\n\n```csharp\nTools = [\n new WebSearchTool20260209(),\n new ToolBash20250124(),\n new ToolTextEditor20250728(),\n new CodeExecutionTool20260120(),\n],\n```\n\nAlso available: `WebFetchTool20260209`, `MemoryTool20250818`. `WebSearchTool20260209` optionals: `AllowedDomains`, `BlockedDomains`, `MaxUses`, `UserLocation`.\n\n---\n\n## Files API (Beta)\n\nFiles live under `client.Beta.Files` (namespace `Anthropic.Models.Beta.Files`). `BinaryContent` implicit-converts from `Stream` and `byte[]`.\n\n```csharp\nusing Anthropic.Models.Beta.Files;\nusing Anthropic.Models.Beta.Messages;\n\nFileMetadata meta = await client.Beta.Files.Upload(\n new FileUploadParams { File = File.OpenRead(\"doc.pdf\") });\n\n// Referencing the uploaded file requires Beta message types:\nnew BetaRequestDocumentBlock {\n Source = new BetaFileDocumentSource { FileID = meta.ID },\n}\n```\n\nThe non-beta `DocumentBlockParamSource` union has no file-ID variant — file references need `client.Beta.Messages.Create()`.\n" + } + ] + }, + { + "name": "curl", + "node_type": "folder", + "children": [ + { + "name": "examples.md", + "node_type": "file", + "content": "# Claude API — cURL / Raw HTTP\n\nUse these examples when the user needs raw HTTP requests or is working in a language without an official SDK.\n\n## Setup\n\n```bash\nexport ANTHROPIC_API_KEY=\"your-api-key\"\n```\n\n---\n\n## Basic Message Request\n\n```bash\ncurl https://api.anthropic.com/v1/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\" \\\n -d '{\n \"model\": \"claude-opus-4-6\",\n \"max_tokens\": 16000,\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the capital of France?\"}\n ]\n }'\n```\n\n### Parsing the response\n\nUse `jq` to extract fields from the JSON response. Do not use `grep`/`sed` —\nJSON strings can contain any character and regex parsing will break on quotes,\nescapes, or multi-line content.\n\n```bash\n# Capture the response, then extract fields\nresponse=$(curl -s https://api.anthropic.com/v1/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\" \\\n -d '{\"model\":\"claude-opus-4-6\",\"max_tokens\":16000,\"messages\":[{\"role\":\"user\",\"content\":\"Hello\"}]}')\n\n# Print the first text block (-r strips the JSON quotes)\necho \"$response\" | jq -r '.content[0].text'\n\n# Read usage fields\ninput_tokens=$(echo \"$response\" | jq -r '.usage.input_tokens')\noutput_tokens=$(echo \"$response\" | jq -r '.usage.output_tokens')\n\n# Read stop reason (for tool-use loops)\nstop_reason=$(echo \"$response\" | jq -r '.stop_reason')\n\n# Extract all text blocks (content is an array; filter to type==\"text\")\necho \"$response\" | jq -r '.content[] | select(.type == \"text\") | .text'\n```\n\n\n---\n\n## Streaming (SSE)\n\n```bash\ncurl https://api.anthropic.com/v1/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\" \\\n -d '{\n \"model\": \"claude-opus-4-6\",\n \"max_tokens\": 64000,\n \"stream\": true,\n \"messages\": [{\"role\": \"user\", \"content\": \"Write a haiku\"}]\n }'\n```\n\nThe response is a stream of Server-Sent Events:\n\n```\nevent: message_start\ndata: {\"type\":\"message_start\",\"message\":{\"id\":\"msg_...\",\"type\":\"message\",...}}\n\nevent: content_block_start\ndata: {\"type\":\"content_block_start\",\"index\":0,\"content_block\":{\"type\":\"text\",\"text\":\"\"}}\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"Hello\"}}\n\nevent: content_block_stop\ndata: {\"type\":\"content_block_stop\",\"index\":0}\n\nevent: message_delta\ndata: {\"type\":\"message_delta\",\"delta\":{\"stop_reason\":\"end_turn\"},\"usage\":{\"output_tokens\":12}}\n\nevent: message_stop\ndata: {\"type\":\"message_stop\"}\n```\n\n---\n\n## Tool Use\n\n```bash\ncurl https://api.anthropic.com/v1/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\" \\\n -d '{\n \"model\": \"claude-opus-4-6\",\n \"max_tokens\": 16000,\n \"tools\": [{\n \"name\": \"get_weather\",\n \"description\": \"Get current weather for a location\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\"type\": \"string\", \"description\": \"City name\"}\n },\n \"required\": [\"location\"]\n }\n }],\n \"messages\": [{\"role\": \"user\", \"content\": \"What is the weather in Paris?\"}]\n }'\n```\n\nWhen Claude responds with a `tool_use` block, send the result back:\n\n```bash\ncurl https://api.anthropic.com/v1/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\" \\\n -d '{\n \"model\": \"claude-opus-4-6\",\n \"max_tokens\": 16000,\n \"tools\": [{\n \"name\": \"get_weather\",\n \"description\": \"Get current weather for a location\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\"type\": \"string\", \"description\": \"City name\"}\n },\n \"required\": [\"location\"]\n }\n }],\n \"messages\": [\n {\"role\": \"user\", \"content\": \"What is the weather in Paris?\"},\n {\"role\": \"assistant\", \"content\": [\n {\"type\": \"text\", \"text\": \"Let me check the weather.\"},\n {\"type\": \"tool_use\", \"id\": \"toolu_abc123\", \"name\": \"get_weather\", \"input\": {\"location\": \"Paris\"}}\n ]},\n {\"role\": \"user\", \"content\": [\n {\"type\": \"tool_result\", \"tool_use_id\": \"toolu_abc123\", \"content\": \"72°F and sunny\"}\n ]}\n ]\n }'\n```\n\n---\n\n## Prompt Caching\n\nPut `cache_control` on the last block of the stable prefix. See `shared/prompt-caching.md` for placement patterns and the silent-invalidator audit checklist.\n\n```bash\ncurl https://api.anthropic.com/v1/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\" \\\n -d '{\n \"model\": \"claude-opus-4-6\",\n \"max_tokens\": 16000,\n \"system\": [\n {\"type\": \"text\", \"text\": \"\", \"cache_control\": {\"type\": \"ephemeral\"}}\n ],\n \"messages\": [{\"role\": \"user\", \"content\": \"Summarize the key points\"}]\n }'\n```\n\nFor 1-hour TTL: `\"cache_control\": {\"type\": \"ephemeral\", \"ttl\": \"1h\"}`. Top-level `\"cache_control\"` on the request body auto-places on the last cacheable block. Verify hits via the response `usage.cache_creation_input_tokens` / `usage.cache_read_input_tokens` fields.\n\n---\n\n## Extended Thinking\n\n> **Opus 4.6 and Sonnet 4.6:** Use adaptive thinking. `budget_tokens` is deprecated on both Opus 4.6 and Sonnet 4.6.\n> **Older models:** Use `\"type\": \"enabled\"` with `\"budget_tokens\": N` (must be < `max_tokens`, min 1024).\n\n```bash\n# Opus 4.6: adaptive thinking (recommended)\ncurl https://api.anthropic.com/v1/messages \\\n -H \"Content-Type: application/json\" \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\" \\\n -d '{\n \"model\": \"claude-opus-4-6\",\n \"max_tokens\": 16000,\n \"thinking\": {\n \"type\": \"adaptive\"\n },\n \"output_config\": {\n \"effort\": \"high\"\n },\n \"messages\": [{\"role\": \"user\", \"content\": \"Solve this step by step...\"}]\n }'\n```\n\n---\n\n## Required Headers\n\n| Header | Value | Description |\n| ------------------- | ------------------ | -------------------------- |\n| `Content-Type` | `application/json` | Required |\n| `x-api-key` | Your API key | Authentication |\n| `anthropic-version` | `2023-06-01` | API version |\n| `anthropic-beta` | Beta feature IDs | Required for beta features |\n" + } + ] + }, + { + "name": "go", + "node_type": "folder", + "children": [ + { + "name": "claude-api.md", + "node_type": "file", + "content": "# Claude API — Go\n\n> **Note:** The Go SDK supports the Claude API and beta tool use with `BetaToolRunner`. Agent SDK is not yet available for Go.\n\n## Installation\n\n```bash\ngo get github.com/anthropics/anthropic-sdk-go\n```\n\n## Client Initialization\n\n```go\nimport (\n \"github.com/anthropics/anthropic-sdk-go\"\n \"github.com/anthropics/anthropic-sdk-go/option\"\n)\n\n// Default (uses ANTHROPIC_API_KEY env var)\nclient := anthropic.NewClient()\n\n// Explicit API key\nclient := anthropic.NewClient(\n option.WithAPIKey(\"your-api-key\"),\n)\n```\n\n---\n\n## Basic Message Request\n\n```go\nresponse, err := client.Messages.New(context.Background(), anthropic.MessageNewParams{\n Model: anthropic.ModelClaudeOpus4_6,\n MaxTokens: 16000,\n Messages: []anthropic.MessageParam{\n anthropic.NewUserMessage(anthropic.NewTextBlock(\"What is the capital of France?\")),\n },\n})\nif err != nil {\n log.Fatal(err)\n}\nfor _, block := range response.Content {\n switch variant := block.AsAny().(type) {\n case anthropic.TextBlock:\n fmt.Println(variant.Text)\n }\n}\n```\n\n---\n\n## Streaming\n\n```go\nstream := client.Messages.NewStreaming(context.Background(), anthropic.MessageNewParams{\n Model: anthropic.ModelClaudeOpus4_6,\n MaxTokens: 64000,\n Messages: []anthropic.MessageParam{\n anthropic.NewUserMessage(anthropic.NewTextBlock(\"Write a haiku\")),\n },\n})\n\nfor stream.Next() {\n event := stream.Current()\n switch eventVariant := event.AsAny().(type) {\n case anthropic.ContentBlockDeltaEvent:\n switch deltaVariant := eventVariant.Delta.AsAny().(type) {\n case anthropic.TextDelta:\n fmt.Print(deltaVariant.Text)\n }\n }\n}\nif err := stream.Err(); err != nil {\n log.Fatal(err)\n}\n```\n\n**Accumulating the final message** (there is no `GetFinalMessage()` on the stream):\n\n```go\nstream := client.Messages.NewStreaming(ctx, params)\nmessage := anthropic.Message{}\nfor stream.Next() {\n message.Accumulate(stream.Current())\n}\nif err := stream.Err(); err != nil { log.Fatal(err) }\n// message.Content now has the complete response\n```\n\n\n---\n\n## Tool Use\n\n### Tool Runner (Beta — Recommended)\n\n**Beta:** The Go SDK provides `BetaToolRunner` for automatic tool use loops via the `toolrunner` package.\n\n```go\nimport (\n \"context\"\n \"fmt\"\n \"log\"\n\n \"github.com/anthropics/anthropic-sdk-go\"\n \"github.com/anthropics/anthropic-sdk-go/toolrunner\"\n)\n\n// Define tool input with jsonschema tags for automatic schema generation\ntype GetWeatherInput struct {\n City string `json:\"city\" jsonschema:\"required,description=The city name\"`\n}\n\n// Create a tool with automatic schema generation from struct tags\nweatherTool, err := toolrunner.NewBetaToolFromJSONSchema(\n \"get_weather\",\n \"Get current weather for a city\",\n func(ctx context.Context, input GetWeatherInput) (anthropic.BetaToolResultBlockParamContentUnion, error) {\n return anthropic.BetaToolResultBlockParamContentUnion{\n OfText: &anthropic.BetaTextBlockParam{\n Text: fmt.Sprintf(\"The weather in %s is sunny, 72°F\", input.City),\n },\n }, nil\n },\n)\nif err != nil {\n log.Fatal(err)\n}\n\n// Create a tool runner that handles the conversation loop automatically\nrunner := client.Beta.Messages.NewToolRunner(\n []anthropic.BetaTool{weatherTool},\n anthropic.BetaToolRunnerParams{\n BetaMessageNewParams: anthropic.BetaMessageNewParams{\n Model: anthropic.ModelClaudeOpus4_6,\n MaxTokens: 16000,\n Messages: []anthropic.BetaMessageParam{\n anthropic.NewBetaUserMessage(anthropic.NewBetaTextBlock(\"What's the weather in Paris?\")),\n },\n },\n MaxIterations: 5,\n },\n)\n\n// Run until Claude produces a final response\nmessage, err := runner.RunToCompletion(context.Background())\nif err != nil {\n log.Fatal(err)\n}\n\n// RunToCompletion returns *BetaMessage; content is []BetaContentBlockUnion.\n// Narrow via AsAny() switch — note the Beta-namespace types (BetaTextBlock,\n// not TextBlock):\nfor _, block := range message.Content {\n switch block := block.AsAny().(type) {\n case anthropic.BetaTextBlock:\n fmt.Println(block.Text)\n }\n}\n```\n\n**Key features of the Go tool runner:**\n\n- Automatic schema generation from Go structs via `jsonschema` tags\n- `RunToCompletion()` for simple one-shot usage\n- `All()` iterator for processing each message in the conversation\n- `NextMessage()` for step-by-step iteration\n- Streaming variant via `NewToolRunnerStreaming()` with `AllStreaming()`\n\n### Manual Loop\n\nFor fine-grained control over the agentic loop, define tools with `ToolParam`, check `StopReason`, execute tools yourself, and feed `tool_result` blocks back. This is the pattern when you need to intercept, validate, or log tool calls.\n\nDerived from `anthropic-sdk-go/examples/tools/main.go`.\n\n```go\npackage main\n\nimport (\n \"context\"\n \"encoding/json\"\n \"fmt\"\n \"log\"\n\n \"github.com/anthropics/anthropic-sdk-go\"\n)\n\nfunc main() {\n client := anthropic.NewClient()\n\n // 1. Define tools. ToolParam.InputSchema uses a map, no struct tags needed.\n addTool := anthropic.ToolParam{\n Name: \"add\",\n Description: anthropic.String(\"Add two integers\"),\n InputSchema: anthropic.ToolInputSchemaParam{\n Properties: map[string]any{\n \"a\": map[string]any{\"type\": \"integer\"},\n \"b\": map[string]any{\"type\": \"integer\"},\n },\n },\n }\n // ToolParam must be wrapped in ToolUnionParam for the Tools slice\n tools := []anthropic.ToolUnionParam{{OfTool: &addTool}}\n\n messages := []anthropic.MessageParam{\n anthropic.NewUserMessage(anthropic.NewTextBlock(\"What is 2 + 3?\")),\n }\n\n for {\n resp, err := client.Messages.New(context.Background(), anthropic.MessageNewParams{\n Model: anthropic.ModelClaudeSonnet4_6,\n MaxTokens: 16000,\n Messages: messages,\n Tools: tools,\n })\n if err != nil {\n log.Fatal(err)\n }\n\n // 2. Append the assistant response to history BEFORE processing tool calls.\n // resp.ToParam() converts Message → MessageParam in one call.\n messages = append(messages, resp.ToParam())\n\n // 3. Walk content blocks. ContentBlockUnion is a flattened struct;\n // use block.AsAny().(type) to switch on the actual variant.\n toolResults := []anthropic.ContentBlockParamUnion{}\n for _, block := range resp.Content {\n switch variant := block.AsAny().(type) {\n case anthropic.TextBlock:\n fmt.Println(variant.Text)\n case anthropic.ToolUseBlock:\n // 4. Parse the tool input. Use variant.JSON.Input.Raw() to get the\n // raw JSON — block.Input is json.RawMessage, not the parsed value.\n var in struct {\n A int `json:\"a\"`\n B int `json:\"b\"`\n }\n if err := json.Unmarshal([]byte(variant.JSON.Input.Raw()), &in); err != nil {\n log.Fatal(err)\n }\n result := fmt.Sprintf(\"%d\", in.A+in.B)\n // 5. NewToolResultBlock(toolUseID, content, isError) builds the\n // ContentBlockParamUnion for you. block.ID is the tool_use_id.\n toolResults = append(toolResults,\n anthropic.NewToolResultBlock(block.ID, result, false))\n }\n }\n\n // 6. Exit when Claude stops asking for tools\n if resp.StopReason != anthropic.StopReasonToolUse {\n break\n }\n\n // 7. Tool results go in a user message (variadic: all results in one turn)\n messages = append(messages, anthropic.NewUserMessage(toolResults...))\n }\n}\n```\n\n**Key API surface:**\n\n| Symbol | Purpose |\n|---|---|\n| `resp.ToParam()` | Convert `Message` response → `MessageParam` for history |\n| `block.AsAny().(type)` | Type-switch on `ContentBlockUnion` variants |\n| `variant.JSON.Input.Raw()` | Raw JSON string of tool input (for `json.Unmarshal`) |\n| `anthropic.NewToolResultBlock(id, content, isError)` | Build `tool_result` block |\n| `anthropic.NewUserMessage(blocks...)` | Wrap tool results as a user turn |\n| `anthropic.StopReasonToolUse` | `StopReason` constant to check loop termination |\n| `anthropic.ToolUnionParam{OfTool: &t}` | Wrap `ToolParam` in the union for `Tools:` |\n\n---\n\n## Thinking\n\nEnable Claude's internal reasoning by setting `Thinking` in `MessageNewParams`. The response will contain `ThinkingBlock` content before the final `TextBlock`.\n\n**Adaptive thinking is the recommended mode for Claude 4.6+ models.** Claude decides dynamically when and how much to think. Combine with the `effort` parameter for cost-quality control.\n\nDerived from `anthropic-sdk-go/message.go` (`ThinkingConfigParamUnion`, `NewThinkingConfigAdaptiveParam`).\n\n```go\n// There is no ThinkingConfigParamOfAdaptive helper — construct the union\n// struct-literal directly and take the address of the variant.\nadaptive := anthropic.NewThinkingConfigAdaptiveParam()\nparams := anthropic.MessageNewParams{\n Model: anthropic.ModelClaudeSonnet4_6,\n MaxTokens: 16000,\n Thinking: anthropic.ThinkingConfigParamUnion{OfAdaptive: &adaptive},\n Messages: []anthropic.MessageParam{\n anthropic.NewUserMessage(anthropic.NewTextBlock(\"How many r's in strawberry?\")),\n },\n}\n\nresp, err := client.Messages.New(context.Background(), params)\nif err != nil {\n log.Fatal(err)\n}\n\n// ThinkingBlock(s) precede TextBlock in content\nfor _, block := range resp.Content {\n switch b := block.AsAny().(type) {\n case anthropic.ThinkingBlock:\n fmt.Println(\"[thinking]\", b.Thinking)\n case anthropic.TextBlock:\n fmt.Println(b.Text)\n }\n}\n```\n\n> **Deprecated:** `ThinkingConfigParamOfEnabled(budgetTokens)` (fixed-budget extended thinking) still works on Claude 4.6 but is deprecated. Use adaptive thinking above.\n\nTo disable: `anthropic.ThinkingConfigParamUnion{OfDisabled: &anthropic.ThinkingConfigDisabledParam{}}`.\n\n---\n\n## Prompt Caching\n\n`System` is `[]TextBlockParam`; set `CacheControl` on the last block to cache tools + system together. For placement patterns and the silent-invalidator audit checklist, see `shared/prompt-caching.md`.\n\n```go\nSystem: []anthropic.TextBlockParam{{\n Text: longSystemPrompt,\n CacheControl: anthropic.NewCacheControlEphemeralParam(), // default 5m TTL\n}},\n```\n\nFor 1-hour TTL: `anthropic.CacheControlEphemeralParam{TTL: anthropic.CacheControlEphemeralTTLTTL1h}`. There's also a top-level `CacheControl` on `MessageNewParams` that auto-places on the last cacheable block.\n\nVerify hits via `resp.Usage.CacheCreationInputTokens` / `resp.Usage.CacheReadInputTokens`.\n\n---\n\n## Server-Side Tools\n\nVersion-suffixed struct names with `Param` suffix. `Name`/`Type` are `constant.*` types — zero value marshals correctly, so `{}` works. Wrap in `ToolUnionParam` with the matching `Of*` field.\n\n```go\nTools: []anthropic.ToolUnionParam{\n {OfWebSearchTool20260209: &anthropic.WebSearchTool20260209Param{}},\n {OfBashTool20250124: &anthropic.ToolBash20250124Param{}},\n {OfTextEditor20250728: &anthropic.ToolTextEditor20250728Param{}},\n {OfCodeExecutionTool20260120: &anthropic.CodeExecutionTool20260120Param{}},\n},\n```\n\nAlso available: `WebFetchTool20260209Param`, `MemoryTool20250818Param`, `ToolSearchToolBm25_20251119Param`, `ToolSearchToolRegex20251119Param`.\n\n---\n\n## PDF / Document Input\n\n`NewDocumentBlock` generic helper accepts any source type. `MediaType`/`Type` are auto-set.\n\n```go\nb64 := base64.StdEncoding.EncodeToString(pdfBytes)\n\nmsg := anthropic.NewUserMessage(\n anthropic.NewDocumentBlock(anthropic.Base64PDFSourceParam{Data: b64}),\n anthropic.NewTextBlock(\"Summarize this document\"),\n)\n```\n\nOther sources: `URLPDFSourceParam{URL: \"https://...\"}`, `PlainTextSourceParam{Data: \"...\"}`.\n\n---\n\n## Files API (Beta)\n\nUnder `client.Beta.Files`. Method is **`Upload`** (NOT `New`/`Create`), params struct is `BetaFileUploadParams`. The `File` field takes an `io.Reader`; use `anthropic.File()` to attach a filename + content-type for the multipart encoding.\n\n```go\nf, _ := os.Open(\"./upload_me.txt\")\ndefer f.Close()\n\nmeta, err := client.Beta.Files.Upload(ctx, anthropic.BetaFileUploadParams{\n File: anthropic.File(f, \"upload_me.txt\", \"text/plain\"),\n Betas: []anthropic.AnthropicBeta{anthropic.AnthropicBetaFilesAPI2025_04_14},\n})\n// meta.ID is the file_id to reference in subsequent message requests\n```\n\nOther `Beta.Files` methods: `List`, `Delete`, `Download`, `GetMetadata`.\n\n---\n\n## Context Editing / Compaction (Beta)\n\nUse `Beta.Messages.New` with `ContextManagement` on `BetaMessageNewParams`. There is no `NewBetaAssistantMessage` — use `.ToParam()` for the round-trip.\n\n```go\nparams := anthropic.BetaMessageNewParams{\n Model: anthropic.ModelClaudeOpus4_6, // also supported: ModelClaudeSonnet4_6\n MaxTokens: 16000,\n Betas: []anthropic.AnthropicBeta{\"compact-2026-01-12\"},\n ContextManagement: anthropic.BetaContextManagementConfigParam{\n Edits: []anthropic.BetaContextManagementConfigEditUnionParam{\n {OfCompact20260112: &anthropic.BetaCompact20260112EditParam{}},\n },\n },\n Messages: []anthropic.BetaMessageParam{ /* ... */ },\n}\n\nresp, err := client.Beta.Messages.New(ctx, params)\nif err != nil {\n log.Fatal(err)\n}\n\n// Round-trip: append response to history via .ToParam()\nparams.Messages = append(params.Messages, resp.ToParam())\n\n// Read compaction blocks from the response\nfor _, block := range resp.Content {\n if c, ok := block.AsAny().(anthropic.BetaCompactionBlock); ok {\n fmt.Println(\"compaction summary:\", c.Content)\n }\n}\n```\n\nOther edit types: `BetaClearToolUses20250919EditParam`, `BetaClearThinking20251015EditParam`.\n" + } + ] + }, + { + "name": "java", + "node_type": "folder", + "children": [ + { + "name": "claude-api.md", + "node_type": "file", + "content": "# Claude API — Java\n\n> **Note:** The Java SDK supports the Claude API and beta tool use with annotated classes. Agent SDK is not yet available for Java.\n\n## Installation\n\nMaven:\n\n```xml\n\n com.anthropic\n anthropic-java\n 2.17.0\n\n```\n\nGradle:\n\n```groovy\nimplementation(\"com.anthropic:anthropic-java:2.17.0\")\n```\n\n## Client Initialization\n\n```java\nimport com.anthropic.client.AnthropicClient;\nimport com.anthropic.client.okhttp.AnthropicOkHttpClient;\n\n// Default (reads ANTHROPIC_API_KEY from environment)\nAnthropicClient client = AnthropicOkHttpClient.fromEnv();\n\n// Explicit API key\nAnthropicClient client = AnthropicOkHttpClient.builder()\n .apiKey(\"your-api-key\")\n .build();\n```\n\n---\n\n## Basic Message Request\n\n```java\nimport com.anthropic.models.messages.MessageCreateParams;\nimport com.anthropic.models.messages.Message;\nimport com.anthropic.models.messages.Model;\n\nMessageCreateParams params = MessageCreateParams.builder()\n .model(Model.CLAUDE_OPUS_4_6)\n .maxTokens(16000L)\n .addUserMessage(\"What is the capital of France?\")\n .build();\n\nMessage response = client.messages().create(params);\nresponse.content().stream()\n .flatMap(block -> block.text().stream())\n .forEach(textBlock -> System.out.println(textBlock.text()));\n```\n\n---\n\n## Streaming\n\n```java\nimport com.anthropic.core.http.StreamResponse;\nimport com.anthropic.models.messages.RawMessageStreamEvent;\n\nMessageCreateParams params = MessageCreateParams.builder()\n .model(Model.CLAUDE_OPUS_4_6)\n .maxTokens(64000L)\n .addUserMessage(\"Write a haiku\")\n .build();\n\ntry (StreamResponse streamResponse = client.messages().createStreaming(params)) {\n streamResponse.stream()\n .flatMap(event -> event.contentBlockDelta().stream())\n .flatMap(deltaEvent -> deltaEvent.delta().text().stream())\n .forEach(textDelta -> System.out.print(textDelta.text()));\n}\n```\n\n---\n\n## Thinking\n\n**Adaptive thinking is the recommended mode for Claude 4.6+ models.** Claude decides dynamically when and how much to think. The builder has a direct `.thinking(ThinkingConfigAdaptive)` overload — no manual union wrapping.\n\n```java\nimport com.anthropic.models.messages.ContentBlock;\nimport com.anthropic.models.messages.MessageCreateParams;\nimport com.anthropic.models.messages.Model;\nimport com.anthropic.models.messages.ThinkingConfigAdaptive;\n\nMessageCreateParams params = MessageCreateParams.builder()\n .model(Model.CLAUDE_SONNET_4_6)\n .maxTokens(16000L)\n .thinking(ThinkingConfigAdaptive.builder().build())\n .addUserMessage(\"Solve this step by step: 27 * 453\")\n .build();\n\nfor (ContentBlock block : client.messages().create(params).content()) {\n block.thinking().ifPresent(t -> System.out.println(\"[thinking] \" + t.thinking()));\n block.text().ifPresent(t -> System.out.println(t.text()));\n}\n```\n\n> **Deprecated:** `ThinkingConfigEnabled.builder().budgetTokens(N)` (and the `.enabledThinking(N)` shortcut) still works on Claude 4.6 but is deprecated. Use adaptive thinking above.\n\n`ContentBlock` narrowing: `.thinking()` / `.text()` return `Optional` — use `.ifPresent(...)` or `.stream().flatMap(...)`. Alternative: `isThinking()` / `asThinking()` boolean+unwrap pairs (throws on wrong variant).\n\n---\n\n## Tool Use (Beta)\n\nThe Java SDK supports beta tool use with annotated classes. Tool classes implement `Supplier` for automatic execution via `BetaToolRunner`.\n\n### Tool Runner (automatic loop)\n\n```java\nimport com.anthropic.models.beta.messages.MessageCreateParams;\nimport com.anthropic.models.beta.messages.BetaMessage;\nimport com.anthropic.helpers.BetaToolRunner;\nimport com.fasterxml.jackson.annotation.JsonClassDescription;\nimport com.fasterxml.jackson.annotation.JsonPropertyDescription;\nimport java.util.function.Supplier;\n\n@JsonClassDescription(\"Get the weather in a given location\")\nstatic class GetWeather implements Supplier {\n @JsonPropertyDescription(\"The city and state, e.g. San Francisco, CA\")\n public String location;\n\n @Override\n public String get() {\n return \"The weather in \" + location + \" is sunny and 72°F\";\n }\n}\n\nBetaToolRunner toolRunner = client.beta().messages().toolRunner(\n MessageCreateParams.builder()\n .model(\"claude-opus-4-6\")\n .maxTokens(16000L)\n .putAdditionalHeader(\"anthropic-beta\", \"structured-outputs-2025-11-13\")\n .addTool(GetWeather.class)\n .addUserMessage(\"What's the weather in San Francisco?\")\n .build());\n\nfor (BetaMessage message : toolRunner) {\n System.out.println(message);\n}\n```\n\n### Memory Tool\n\nThe Java SDK provides `BetaMemoryToolHandler` for implementing the memory tool backend. You supply a handler that manages file storage, and the `BetaToolRunner` handles memory tool calls automatically.\n\n```java\nimport com.anthropic.helpers.BetaMemoryToolHandler;\nimport com.anthropic.helpers.BetaToolRunner;\nimport com.anthropic.models.beta.messages.BetaMemoryTool20250818;\nimport com.anthropic.models.beta.messages.BetaMessage;\nimport com.anthropic.models.beta.messages.MessageCreateParams;\nimport com.anthropic.models.beta.messages.ToolRunnerCreateParams;\n\n// Implement BetaMemoryToolHandler with your storage backend (e.g., filesystem)\nBetaMemoryToolHandler memoryHandler = new FileSystemMemoryToolHandler(sandboxRoot);\n\nMessageCreateParams createParams = MessageCreateParams.builder()\n .model(\"claude-opus-4-6\")\n .maxTokens(4096L)\n .addTool(BetaMemoryTool20250818.builder().build())\n .addUserMessage(\"Remember that my favorite color is blue\")\n .build();\n\nBetaToolRunner toolRunner = client.beta().messages().toolRunner(\n ToolRunnerCreateParams.builder()\n .betaMemoryToolHandler(memoryHandler)\n .initialMessageParams(createParams)\n .build());\n\nfor (BetaMessage message : toolRunner) {\n System.out.println(message);\n}\n```\n\nSee the [shared memory tool concepts](../shared/tool-use-concepts.md) for more details on the memory tool.\n\n### Non-Beta Tool Declaration (manual JSON schema)\n\n`Tool.InputSchema.Properties` is a freeform `Map` wrapper — build property schemas via `putAdditionalProperty`. `type: \"object\"` is the default. The builder has a direct `.addTool(Tool)` overload that wraps in `ToolUnion` automatically.\n\n```java\nimport com.anthropic.core.JsonValue;\nimport com.anthropic.models.messages.Tool;\n\nTool tool = Tool.builder()\n .name(\"get_weather\")\n .description(\"Get the current weather in a given location\")\n .inputSchema(Tool.InputSchema.builder()\n .properties(Tool.InputSchema.Properties.builder()\n .putAdditionalProperty(\"location\", JsonValue.from(Map.of(\"type\", \"string\")))\n .build())\n .required(List.of(\"location\"))\n .build())\n .build();\n\nMessageCreateParams params = MessageCreateParams.builder()\n .model(Model.CLAUDE_SONNET_4_6)\n .maxTokens(16000L)\n .addTool(tool)\n .addUserMessage(\"Weather in Paris?\")\n .build();\n```\n\nFor manual tool loops, handle `tool_use` blocks in the response, send `tool_result` back, loop until `stop_reason` is `\"end_turn\"`. See [shared tool use concepts](../shared/tool-use-concepts.md).\n\n### Building `MessageParam` with Content Blocks (Tool Result Round-Trip)\n\n`MessageParam.Content` is an inner union class (string | list). Use the builder's `.contentOfBlockParams(List)` alias — there is NO separate `MessageParamContent` class with a static `ofBlockParams`:\n\n```java\nimport com.anthropic.models.messages.MessageParam;\nimport com.anthropic.models.messages.ContentBlockParam;\nimport com.anthropic.models.messages.ToolResultBlockParam;\n\nList results = List.of(\n ContentBlockParam.ofToolResult(ToolResultBlockParam.builder()\n .toolUseId(toolUseBlock.id())\n .content(yourResultString)\n .build())\n);\n\nMessageParam toolResultMsg = MessageParam.builder()\n .role(MessageParam.Role.USER)\n .contentOfBlockParams(results) // builder alias for Content.ofBlockParams(...)\n .build();\n```\n\n---\n\n## Effort Parameter\n\nEffort is nested inside `OutputConfig` — there is NO `.effort()` directly on `MessageCreateParams.Builder`.\n\n```java\nimport com.anthropic.models.messages.OutputConfig;\n\n.outputConfig(OutputConfig.builder()\n .effort(OutputConfig.Effort.HIGH) // or LOW, MEDIUM, MAX\n .build())\n```\n\nCombine with `Thinking = ThinkingConfigAdaptive` for cost-quality control.\n\n---\n\n## Prompt Caching\n\nSystem message as a list of `TextBlockParam` with `CacheControlEphemeral`. Use `.systemOfTextBlockParams(...)` — the plain `.system(String)` overload can't carry cache control. For placement patterns and the silent-invalidator audit checklist, see `shared/prompt-caching.md`.\n\n```java\nimport com.anthropic.models.messages.TextBlockParam;\nimport com.anthropic.models.messages.CacheControlEphemeral;\n\n.systemOfTextBlockParams(List.of(\n TextBlockParam.builder()\n .text(longSystemPrompt)\n .cacheControl(CacheControlEphemeral.builder()\n .ttl(CacheControlEphemeral.Ttl.TTL_1H) // optional; also TTL_5M\n .build())\n .build()))\n```\n\nThere's also a top-level `.cacheControl(CacheControlEphemeral)` on `MessageCreateParams.Builder` and on `Tool.builder()`.\n\nVerify hits via `response.usage().cacheCreationInputTokens()` / `response.usage().cacheReadInputTokens()`.\n\n---\n\n## Token Counting\n\n```java\nimport com.anthropic.models.messages.MessageCountTokensParams;\n\nlong tokens = client.messages().countTokens(\n MessageCountTokensParams.builder()\n .model(Model.CLAUDE_SONNET_4_6)\n .addUserMessage(\"Hello\")\n .build()\n).inputTokens();\n```\n\n---\n\n## Structured Output\n\nThe class-based overload auto-derives the JSON schema from your POJO and gives you a typed `.text()` return — no manual schema, no manual parsing.\n\n```java\nimport com.anthropic.models.messages.StructuredMessageCreateParams;\n\nrecord Book(String title, String author) {}\nrecord BookList(List books) {}\n\nStructuredMessageCreateParams params = MessageCreateParams.builder()\n .model(Model.CLAUDE_SONNET_4_6)\n .maxTokens(16000L)\n .outputConfig(BookList.class) // returns a typed builder\n .addUserMessage(\"List 3 classic novels\")\n .build();\n\nclient.messages().create(params).content().stream()\n .flatMap(cb -> cb.text().stream())\n .forEach(typed -> {\n // typed.text() returns BookList, not String\n for (Book b : typed.text().books()) System.out.println(b.title());\n });\n```\n\nSupports Jackson annotations: `@JsonPropertyDescription`, `@JsonIgnore`, `@ArraySchema(minItems=...)`. Manual schema path: `OutputConfig.builder().format(JsonOutputFormat.builder().schema(...).build())`.\n\n---\n\n## PDF / Document Input\n\n`DocumentBlockParam` builder has source shortcuts. Wrap in `ContentBlockParam.ofDocument()` and pass via `.addUserMessageOfBlockParams()`.\n\n```java\nimport com.anthropic.models.messages.DocumentBlockParam;\nimport com.anthropic.models.messages.ContentBlockParam;\nimport com.anthropic.models.messages.TextBlockParam;\n\nDocumentBlockParam doc = DocumentBlockParam.builder()\n .base64Source(base64String) // or .urlSource(\"https://...\") or .textSource(\"...\")\n .title(\"My Document\") // optional\n .build();\n\n.addUserMessageOfBlockParams(List.of(\n ContentBlockParam.ofDocument(doc),\n ContentBlockParam.ofText(TextBlockParam.builder().text(\"Summarize this\").build())))\n```\n\n---\n\n## Server-Side Tools\n\nVersion-suffixed types; `name`/`type` auto-set by builder. Direct `.addTool()` overloads exist for every type — no manual `ToolUnion` wrapping.\n\n```java\nimport com.anthropic.models.messages.WebSearchTool20260209;\nimport com.anthropic.models.messages.ToolBash20250124;\nimport com.anthropic.models.messages.ToolTextEditor20250728;\nimport com.anthropic.models.messages.CodeExecutionTool20260120;\n\n.addTool(WebSearchTool20260209.builder()\n .maxUses(5L) // optional\n .allowedDomains(List.of(\"example.com\")) // optional\n .build())\n.addTool(ToolBash20250124.builder().build())\n.addTool(ToolTextEditor20250728.builder().build())\n.addTool(CodeExecutionTool20260120.builder().build())\n```\n\nAlso available: `WebFetchTool20260209`, `MemoryTool20250818`, `ToolSearchToolBm25_20251119`.\n\n### Beta namespace (MCP, compaction)\n\nFor beta-only features use `com.anthropic.models.beta.messages.*` — class names have a `Beta` prefix AND live in the beta package. The beta `MessageCreateParams.Builder` has direct `.addTool(BetaToolBash20250124)` overloads AND `.addMcpServer()`:\n\n```java\nimport com.anthropic.models.beta.messages.MessageCreateParams;\nimport com.anthropic.models.beta.messages.BetaToolBash20250124;\nimport com.anthropic.models.beta.messages.BetaCodeExecutionTool20260120;\nimport com.anthropic.models.beta.messages.BetaRequestMcpServerUrlDefinition;\n\nMessageCreateParams params = MessageCreateParams.builder()\n .model(Model.CLAUDE_OPUS_4_6)\n .maxTokens(16000L)\n .addBeta(\"mcp-client-2025-11-20\")\n .addTool(BetaToolBash20250124.builder().build())\n .addTool(BetaCodeExecutionTool20260120.builder().build())\n .addMcpServer(BetaRequestMcpServerUrlDefinition.builder()\n .name(\"my-server\")\n .url(\"https://example.com/mcp\")\n .build())\n .addUserMessage(\"...\")\n .build();\n\nclient.beta().messages().create(params);\n```\n\n`BetaTool*` types are NOT interchangeable with non-beta `Tool*` — pick one namespace per request.\n\n**Reading server-tool blocks in the response:** `ServerToolUseBlock` has `.id()`, `.name()` (enum), and `._input()` returning raw `JsonValue` — there is NO typed `.input()`. For code execution results, unwrap two levels:\n\n```java\nfor (ContentBlock block : response.content()) {\n block.serverToolUse().ifPresent(stu -> {\n System.out.println(\"tool: \" + stu.name() + \" input: \" + stu._input());\n });\n block.codeExecutionToolResult().ifPresent(r -> {\n r.content().resultBlock().ifPresent(result -> {\n System.out.println(\"stdout: \" + result.stdout());\n System.out.println(\"stderr: \" + result.stderr());\n System.out.println(\"exit: \" + result.returnCode());\n });\n });\n}\n```\n\n---\n\n## Files API (Beta)\n\nUnder `client.beta().files()`. File references in messages need the beta message types (non-beta `DocumentBlockParam.Source` has no file-ID variant).\n\n```java\nimport com.anthropic.models.beta.files.FileUploadParams;\nimport com.anthropic.models.beta.files.FileMetadata;\nimport com.anthropic.models.beta.messages.BetaRequestDocumentBlock;\nimport java.nio.file.Paths;\n\nFileMetadata meta = client.beta().files().upload(\n FileUploadParams.builder()\n .file(Paths.get(\"/path/to/doc.pdf\")) // or .file(InputStream) or .file(byte[])\n .build());\n\n// Reference in a beta message:\nBetaRequestDocumentBlock doc = BetaRequestDocumentBlock.builder()\n .fileSource(meta.id())\n .build();\n```\n\nOther methods: `.list()`, `.delete(String fileId)`, `.download(String fileId)`, `.retrieveMetadata(String fileId)`.\n" + } + ] + }, + { + "name": "php", + "node_type": "folder", + "children": [ + { + "name": "claude-api.md", + "node_type": "file", + "content": "# Claude API — PHP\n\n> **Note:** The PHP SDK is the official Anthropic SDK for PHP. A beta tool runner is available via `$client->beta->messages->toolRunner()`. Structured output helpers are supported via `StructuredOutputModel` classes. Agent SDK is not available. Bedrock, Vertex AI, and Foundry clients are supported.\n\n## Installation\n\n```bash\ncomposer require \"anthropic-ai/sdk\"\n```\n\n## Client Initialization\n\n```php\nuse Anthropic\\Client;\n\n// Using API key from environment variable\n$client = new Client(apiKey: getenv(\"ANTHROPIC_API_KEY\"));\n```\n\n### Amazon Bedrock\n\n```php\nuse Anthropic\\Bedrock;\n\n// Constructor is private — use the static factory. Reads AWS credentials from env.\n$client = Bedrock\\Client::fromEnvironment(region: 'us-east-1');\n```\n\n### Google Vertex AI\n\n```php\nuse Anthropic\\Vertex;\n\n// Constructor is private. Parameter is `location`, not `region`.\n$client = Vertex\\Client::fromEnvironment(\n location: 'us-east5',\n projectId: 'my-project-id',\n);\n```\n\n### Anthropic Foundry\n\n```php\nuse Anthropic\\Foundry;\n\n// Constructor is private. baseUrl or resource is required.\n$client = Foundry\\Client::withCredentials(\n authToken: getenv('ANTHROPIC_FOUNDRY_AUTH_TOKEN'),\n baseUrl: 'https://.services.ai.azure.com/anthropic',\n);\n```\n\n---\n\n## Basic Message Request\n\n```php\n$message = $client->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n messages: [\n ['role' => 'user', 'content' => 'What is the capital of France?'],\n ],\n);\n\n// content is an array of polymorphic blocks (TextBlock, ToolUseBlock,\n// ThinkingBlock). Accessing ->text on content[0] without checking the block\n// type will throw if the first block is not a TextBlock (e.g., when extended\n// thinking is enabled and a ThinkingBlock comes first). Always guard:\nforeach ($message->content as $block) {\n if ($block->type === 'text') {\n echo $block->text;\n }\n}\n```\n\nIf you only want the first text block:\n\n```php\nforeach ($message->content as $block) {\n if ($block->type === 'text') {\n echo $block->text;\n break;\n }\n}\n```\n\n---\n\n## Streaming\n\n> **Requires SDK v0.5.0+.** v0.4.0 and earlier used a single `$params` array; calling with named parameters throws `Unknown named parameter $model`. Upgrade: `composer require \"anthropic-ai/sdk:^0.7\"`\n\n```php\nuse Anthropic\\Messages\\RawContentBlockDeltaEvent;\nuse Anthropic\\Messages\\TextDelta;\n\n$stream = $client->messages->createStream(\n model: 'claude-opus-4-6',\n maxTokens: 64000,\n messages: [\n ['role' => 'user', 'content' => 'Write a haiku'],\n ],\n);\n\nforeach ($stream as $event) {\n if ($event instanceof RawContentBlockDeltaEvent && $event->delta instanceof TextDelta) {\n echo $event->delta->text;\n }\n}\n```\n\n---\n\n## Tool Use\n\n### Tool Runner (Beta)\n\n**Beta:** The PHP SDK provides a tool runner via `$client->beta->messages->toolRunner()`. Define tools with `BetaRunnableTool` — a definition array plus a `run` closure:\n\n```php\nuse Anthropic\\Lib\\Tools\\BetaRunnableTool;\n\n$weatherTool = new BetaRunnableTool(\n definition: [\n 'name' => 'get_weather',\n 'description' => 'Get the current weather for a location.',\n 'input_schema' => [\n 'type' => 'object',\n 'properties' => [\n 'location' => ['type' => 'string', 'description' => 'City and state'],\n ],\n 'required' => ['location'],\n ],\n ],\n run: function (array $input): string {\n return \"The weather in {$input['location']} is sunny and 72°F.\";\n },\n);\n\n$runner = $client->beta->messages->toolRunner(\n maxTokens: 16000,\n messages: [['role' => 'user', 'content' => 'What is the weather in Paris?']],\n model: 'claude-opus-4-6',\n tools: [$weatherTool],\n);\n\nforeach ($runner as $message) {\n foreach ($message->content as $block) {\n if ($block->type === 'text') {\n echo $block->text;\n }\n }\n}\n```\n\n### Manual Loop\n\nTools are passed as arrays. **The SDK uses camelCase keys** (`inputSchema`, `toolUseID`, `stopReason`) and auto-maps to the API's snake_case on the wire — since v0.5.0. See [shared tool use concepts](../shared/tool-use-concepts.md) for the loop pattern.\n\n```php\nuse Anthropic\\Messages\\ToolUseBlock;\n\n$tools = [\n [\n 'name' => 'get_weather',\n 'description' => 'Get the current weather in a given location',\n 'inputSchema' => [ // camelCase, not input_schema\n 'type' => 'object',\n 'properties' => [\n 'location' => ['type' => 'string', 'description' => 'City and state'],\n ],\n 'required' => ['location'],\n ],\n ],\n];\n\n$messages = [['role' => 'user', 'content' => 'What is the weather in SF?']];\n\n$response = $client->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n tools: $tools,\n messages: $messages,\n);\n\nwhile ($response->stopReason === 'tool_use') { // camelCase property\n $toolResults = [];\n foreach ($response->content as $block) {\n if ($block instanceof ToolUseBlock) {\n // $block->name : string — tool name to dispatch on\n // $block->input : array — parsed JSON input\n // $block->id : string — pass back as toolUseID\n $result = executeYourTool($block->name, $block->input);\n $toolResults[] = [\n 'type' => 'tool_result',\n 'toolUseID' => $block->id, // camelCase, not tool_use_id\n 'content' => $result,\n ];\n }\n }\n\n // Append assistant turn + user turn with tool results\n $messages[] = ['role' => 'assistant', 'content' => $response->content];\n $messages[] = ['role' => 'user', 'content' => $toolResults];\n\n $response = $client->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n tools: $tools,\n messages: $messages,\n );\n}\n\n// Final text response\nforeach ($response->content as $block) {\n if ($block->type === 'text') {\n echo $block->text;\n }\n}\n```\n\n`$block->type === 'tool_use'` also works; `instanceof ToolUseBlock` narrows for PHPStan.\n\n\n---\n\n## Extended Thinking\n\n**Adaptive thinking is the recommended mode for Claude 4.6+ models.** Claude decides dynamically when and how much to think.\n\n```php\nuse Anthropic\\Messages\\ThinkingBlock;\n\n$message = $client->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n thinking: ['type' => 'adaptive'],\n messages: [\n ['role' => 'user', 'content' => 'Solve: 27 * 453'],\n ],\n);\n\n// ThinkingBlock(s) precede TextBlock in content\nforeach ($message->content as $block) {\n if ($block instanceof ThinkingBlock) {\n echo \"Thinking:\\n{$block->thinking}\\n\\n\";\n // $block->signature is an opaque string — preserve verbatim if\n // passing thinking blocks back in multi-turn conversations\n } elseif ($block->type === 'text') {\n echo \"Answer: {$block->text}\\n\";\n }\n}\n```\n\n> **Deprecated:** `['type' => 'enabled', 'budgetTokens' => N]` (fixed-budget extended thinking) still works on Claude 4.6 but is deprecated. Use adaptive thinking above.\n\n`$block->type === 'thinking'` also works for the check; `instanceof` narrows for PHPStan.\n\n---\n\n## Prompt Caching\n\n`system:` takes an array of text blocks; set `cacheControl` on the last block. Array-shape syntax (camelCase keys) is idiomatic. For placement patterns and the silent-invalidator audit checklist, see `shared/prompt-caching.md`.\n\n```php\n$message = $client->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n system: [\n ['type' => 'text', 'text' => $longSystemPrompt, 'cacheControl' => ['type' => 'ephemeral']],\n ],\n messages: [['role' => 'user', 'content' => 'Summarize the key points']],\n);\n```\n\nFor 1-hour TTL: `'cacheControl' => ['type' => 'ephemeral', 'ttl' => '1h']`. There's also a top-level `cacheControl:` on `messages->create(...)` that auto-places on the last cacheable block.\n\nVerify hits via `$message->usage->cacheCreationInputTokens` / `$message->usage->cacheReadInputTokens`.\n\n---\n\n## Structured Outputs\n\n### Using StructuredOutputModel (Recommended)\n\nDefine a PHP class implementing `StructuredOutputModel` and pass it as `outputConfig`:\n\n```php\nuse Anthropic\\Lib\\Contracts\\StructuredOutputModel;\nuse Anthropic\\Lib\\Concerns\\StructuredOutputModelTrait;\nuse Anthropic\\Lib\\Attributes\\Constrained;\n\nclass Person implements StructuredOutputModel\n{\n use StructuredOutputModelTrait;\n\n #[Constrained(description: 'Full name')]\n public string $name;\n\n public int $age;\n\n public ?string $email = null; // nullable = optional field\n}\n\n$message = $client->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n messages: [['role' => 'user', 'content' => 'Generate a profile for Alice, age 30']],\n outputConfig: ['format' => Person::class],\n);\n\n$person = $message->parsedOutput(); // Person instance\necho $person->name;\n```\n\nTypes are inferred from PHP type hints. Use `#[Constrained(description: '...')]` to add descriptions. Nullable properties (`?string`) become optional fields.\n\n### Raw Schema\n\n```php\n$message = $client->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n messages: [['role' => 'user', 'content' => 'Extract: John (john@co.com), Enterprise plan']],\n outputConfig: [\n 'format' => [\n 'type' => 'json_schema',\n 'schema' => [\n 'type' => 'object',\n 'properties' => [\n 'name' => ['type' => 'string'],\n 'email' => ['type' => 'string'],\n 'plan' => ['type' => 'string'],\n ],\n 'required' => ['name', 'email', 'plan'],\n 'additionalProperties' => false,\n ],\n ],\n ],\n);\n\n// First text block contains valid JSON\nforeach ($message->content as $block) {\n if ($block->type === 'text') {\n $data = json_decode($block->text, true);\n break;\n }\n}\n```\n\n---\n\n## Beta Features & Server-Side Tools\n\n**`betas:` is NOT a param on `$client->messages->create()`** — it only exists on the beta namespace. Use it for features that need an explicit opt-in header:\n\n```php\nuse Anthropic\\Beta\\Messages\\BetaRequestMCPServerURLDefinition;\n\n$response = $client->beta->messages->create(\n model: 'claude-opus-4-6',\n maxTokens: 16000,\n mcpServers: [\n BetaRequestMCPServerURLDefinition::with(\n name: 'my-server',\n url: 'https://example.com/mcp',\n ),\n ],\n betas: ['mcp-client-2025-11-20'], // only valid on ->beta->messages\n messages: [['role' => 'user', 'content' => 'Use the MCP tools']],\n);\n```\n\n**Server-side tools** (bash, web_search, text_editor, code_execution) are GA and work on both paths — `Anthropic\\Messages\\ToolBash20250124` / `WebSearchTool20260209` / `ToolTextEditor20250728` / `CodeExecutionTool20260120` for non-beta, `Anthropic\\Beta\\Messages\\BetaToolBash20250124` / `BetaWebSearchTool20260209` / `BetaToolTextEditor20250728` / `BetaCodeExecutionTool20260120` for beta. No `betas:` header needed for these.\n" + } + ] + }, + { + "name": "python", + "node_type": "folder", + "children": [ + { + "name": "agent-sdk", + "node_type": "folder", + "children": [ + { + "name": "README.md", + "node_type": "file", + "content": "# Agent SDK — Python\n\nThe Claude Agent SDK provides a higher-level interface for building AI agents with built-in tools, safety features, and agentic capabilities.\n\n## Installation\n\n```bash\npip install claude-agent-sdk\n```\n\n---\n\n## Quick Start\n\n```python\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync def main():\n async for message in query(\n prompt=\"Explain this codebase\",\n options=ClaudeAgentOptions(allowed_tools=[\"Read\", \"Glob\", \"Grep\"])\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n\n---\n\n## Built-in Tools\n\n| Tool | Description |\n| --------- | ------------------------------------ |\n| Read | Read files in the workspace |\n| Write | Create new files |\n| Edit | Make precise edits to existing files |\n| Bash | Execute shell commands |\n| Glob | Find files by pattern |\n| Grep | Search files by content |\n| WebSearch | Search the web for information |\n| WebFetch | Fetch and analyze web pages |\n| AskUserQuestion | Ask user clarifying questions |\n| Agent | Spawn subagents |\n\n---\n\n## Primary Interfaces\n\n### `query()` — Simple One-Shot Usage\n\nThe `query()` function is the simplest way to run an agent. It returns an async iterator of messages.\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync for message in query(\n prompt=\"Explain this codebase\",\n options=ClaudeAgentOptions(allowed_tools=[\"Read\", \"Glob\", \"Grep\"])\n):\n if isinstance(message, ResultMessage):\n print(message.result)\n```\n\n### `ClaudeSDKClient` — Full Control\n\n`ClaudeSDKClient` provides full control over the agent lifecycle. Use it when you need custom tools, hooks, streaming, or the ability to interrupt execution.\n\n```python\nimport anyio\nfrom claude_agent_sdk import ClaudeSDKClient, ClaudeAgentOptions, AssistantMessage, TextBlock\n\nasync def main():\n options = ClaudeAgentOptions(allowed_tools=[\"Read\", \"Glob\", \"Grep\"])\n async with ClaudeSDKClient(options=options) as client:\n await client.query(\"Explain this codebase\")\n async for message in client.receive_response():\n if isinstance(message, AssistantMessage):\n for block in message.content:\n if isinstance(block, TextBlock):\n print(block.text)\n\nanyio.run(main)\n```\n\n`ClaudeSDKClient` supports:\n\n- **Context manager** (`async with`) for automatic resource cleanup\n- **`client.query(prompt)`** to send a prompt to the agent\n- **`receive_response()`** for streaming messages until completion\n- **`interrupt()`** to stop agent execution mid-task\n- **Required for custom tools** (via SDK MCP servers)\n\n---\n\n## Permission System\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync for message in query(\n prompt=\"Refactor the authentication module\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Edit\", \"Write\"],\n permission_mode=\"acceptEdits\" # Auto-accept file edits\n )\n):\n if isinstance(message, ResultMessage):\n print(message.result)\n```\n\nPermission modes:\n\n- `\"default\"`: Prompt for dangerous operations\n- `\"plan\"`: Planning only, no execution\n- `\"acceptEdits\"`: Auto-accept file edits\n- `\"bypassPermissions\"`: Skip all prompts (use with caution)\n\n---\n\n## MCP (Model Context Protocol) Support\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync for message in query(\n prompt=\"Open example.com and describe what you see\",\n options=ClaudeAgentOptions(\n mcp_servers={\n \"playwright\": {\"command\": \"npx\", \"args\": [\"@playwright/mcp@latest\"]}\n }\n )\n):\n if isinstance(message, ResultMessage):\n print(message.result)\n```\n\n---\n\n## Hooks\n\nCustomize agent behavior with hooks using callback functions:\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, HookMatcher, ResultMessage\n\nasync def log_file_change(input_data, tool_use_id, context):\n file_path = input_data.get('tool_input', {}).get('file_path', 'unknown')\n print(f\"Modified: {file_path}\")\n return {}\n\nasync for message in query(\n prompt=\"Refactor utils.py\",\n options=ClaudeAgentOptions(\n permission_mode=\"acceptEdits\",\n hooks={\n \"PostToolUse\": [HookMatcher(matcher=\"Edit|Write\", hooks=[log_file_change])]\n }\n )\n):\n if isinstance(message, ResultMessage):\n print(message.result)\n```\n\nHook callback inputs for tool-lifecycle events (`PreToolUse`, `PostToolUse`, `PostToolUseFailure`) include `agent_id` and `agent_type` fields, allowing hooks to identify which agent (main or subagent) triggered the tool call.\n\nAvailable hook events: `PreToolUse`, `PostToolUse`, `PostToolUseFailure`, `UserPromptSubmit`, `Stop`, `SubagentStop`, `PreCompact`, `Notification`, `SubagentStart`, `PermissionRequest`\n\n---\n\n## Common Options\n\n`query()` takes a top-level `prompt` (string) and an `options` object (`ClaudeAgentOptions`):\n\n```python\nasync for message in query(prompt=\"...\", options=ClaudeAgentOptions(...)):\n```\n\n| Option | Type | Description |\n| ----------------------------------- | ------ | -------------------------------------------------------------------------- |\n| `cwd` | string | Working directory for file operations |\n| `allowed_tools` | list | Tools the agent can use (e.g., `[\"Read\", \"Edit\", \"Bash\"]`) |\n| `tools` | list | Built-in tools to make available (restricts the default set) |\n| `disallowed_tools` | list | Tools to explicitly disallow |\n| `permission_mode` | string | How to handle permission prompts |\n| `mcp_servers` | dict | MCP servers to connect to |\n| `hooks` | dict | Hooks for customizing behavior |\n| `system_prompt` | string | Custom system prompt |\n| `max_turns` | int | Maximum agent turns before stopping |\n| `max_budget_usd` | float | Maximum budget in USD for the query |\n| `model` | string | Model ID (default: determined by CLI) |\n| `agents` | dict | Subagent definitions (`dict[str, AgentDefinition]`) |\n| `output_format` | dict | Structured output schema |\n| `thinking` | dict | Thinking/reasoning control |\n| `betas` | list | Beta features to enable (e.g., `[\"context-1m-2025-08-07\"]`) |\n| `setting_sources` | list | Settings to load (e.g., `[\"project\"]`). Default: none (no CLAUDE.md files) |\n| `env` | dict | Environment variables to set for the session |\n\n---\n\n## Message Types\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage, SystemMessage\n\nasync for message in query(\n prompt=\"Find TODO comments\",\n options=ClaudeAgentOptions(allowed_tools=[\"Read\", \"Glob\", \"Grep\"])\n):\n if isinstance(message, ResultMessage):\n print(message.result)\n print(f\"Stop reason: {message.stop_reason}\") # e.g., \"end_turn\", \"max_turns\"\n elif isinstance(message, SystemMessage) and message.subtype == \"init\":\n session_id = message.data.get(\"session_id\") # Capture for resuming later\n```\n\n`AssistantMessage` includes per-turn `usage` data (a dict matching the Anthropic API usage shape) for tracking costs:\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, AssistantMessage\n\nasync for message in query(prompt=\"...\", options=ClaudeAgentOptions()):\n if isinstance(message, AssistantMessage) and message.usage:\n print(f\"Input: {message.usage['input_tokens']}, Output: {message.usage['output_tokens']}\")\n```\n\nTyped task message subclasses are available for better type safety when handling subagent task events:\n- `TaskStartedMessage` — emitted when a subagent task is registered\n- `TaskProgressMessage` — real-time progress updates with cumulative usage metrics\n- `TaskNotificationMessage` — task completion notifications\n\n`RateLimitEvent` is emitted when the rate limit status transitions (e.g., from `allowed` to `allowed_warning` or `rejected`). Use it to warn users or back off gracefully:\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, RateLimitEvent\n\nasync for message in query(prompt=\"...\", options=ClaudeAgentOptions()):\n if isinstance(message, RateLimitEvent):\n print(f\"Rate limit status: {message.rate_limit_info.status}\")\n if message.rate_limit_info.resets_at:\n print(f\"Resets at: {message.rate_limit_info.resets_at}\")\n```\n\n---\n\n## Subagents\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, AgentDefinition, ResultMessage\n\nasync for message in query(\n prompt=\"Use the code-reviewer agent to review this codebase\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Glob\", \"Grep\", \"Agent\"],\n agents={\n \"code-reviewer\": AgentDefinition(\n description=\"Expert code reviewer for quality and security reviews.\",\n prompt=\"Analyze code quality and suggest improvements.\",\n tools=[\"Read\", \"Glob\", \"Grep\"]\n )\n }\n )\n):\n if isinstance(message, ResultMessage):\n print(message.result)\n```\n\n---\n\n## Error Handling\n\n```python\nfrom claude_agent_sdk import query, ClaudeAgentOptions, CLINotFoundError, CLIConnectionError, ResultMessage\n\ntry:\n async for message in query(\n prompt=\"...\",\n options=ClaudeAgentOptions(allowed_tools=[\"Read\"])\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\nexcept CLINotFoundError:\n print(\"Claude Code CLI not found. Install with: pip install claude-agent-sdk\")\nexcept CLIConnectionError as e:\n print(f\"Connection error: {e}\")\n```\n\n---\n\n## Session History\n\nRetrieve past session data with top-level functions:\n\n```python\nfrom claude_agent_sdk import list_sessions, get_session_messages\n\n# List all past sessions (sync function — no await)\nsessions = list_sessions()\nfor session in sessions:\n print(f\"{session.session_id}: {session.cwd}\")\n\n# Get messages from a specific session (sync function — no await)\nmessages = get_session_messages(session_id=\"...\")\nfor msg in messages:\n print(msg)\n```\n\n### Session Mutations\n\nRename or tag sessions (sync functions — no await):\n\n```python\nfrom claude_agent_sdk import rename_session, tag_session\n\n# Rename a session\nrename_session(session_id=\"...\", title=\"My refactoring session\")\n\n# Tag a session (tags are Unicode-sanitized automatically)\ntag_session(session_id=\"...\", tag=\"experiment\")\n\n# Clear a tag\ntag_session(session_id=\"...\", tag=None)\n\n# Optionally scope to a specific project directory\nrename_session(session_id=\"...\", title=\"New title\", directory=\"/path/to/project\")\n```\n\n---\n\n## MCP Server Management\n\nManage MCP servers at runtime using `ClaudeSDKClient`:\n\n```python\nasync with ClaudeSDKClient(options=options) as client:\n # Reconnect a disconnected MCP server\n await client.reconnect_mcp_server(\"my-server\")\n\n # Toggle an MCP server on/off\n await client.toggle_mcp_server(\"my-server\", enabled=False)\n\n # Get status of all MCP servers\n status = await client.get_mcp_status() # returns McpStatusResponse\n```\n\n---\n\n## Best Practices\n\n1. **Always specify allowed_tools** — Explicitly list which tools the agent can use\n2. **Set working directory** — Always specify `cwd` for file operations\n3. **Use appropriate permission modes** — Start with `\"default\"` and only escalate when needed\n4. **Handle all message types** — Check for `ResultMessage` to get agent output\n5. **Limit max_turns** — Prevent runaway agents with reasonable limits\n" + }, + { + "name": "patterns.md", + "node_type": "file", + "content": "# Agent SDK Patterns — Python\n\n## Basic Agent\n\n```python\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync def main():\n async for message in query(\n prompt=\"Explain what this repository does\",\n options=ClaudeAgentOptions(\n cwd=\"/path/to/project\",\n allowed_tools=[\"Read\", \"Glob\", \"Grep\"]\n )\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n\n---\n\n## Custom Tools\n\nCustom tools require an MCP server. Use `ClaudeSDKClient` for full control (custom SDK MCP tools require `ClaudeSDKClient` — `query()` only supports external stdio/http MCP servers).\n\n```python\nimport anyio\nfrom claude_agent_sdk import (\n tool,\n create_sdk_mcp_server,\n ClaudeSDKClient,\n ClaudeAgentOptions,\n AssistantMessage,\n TextBlock,\n)\n\n@tool(\"get_weather\", \"Get the current weather for a location\", {\"location\": str})\nasync def get_weather(args):\n location = args[\"location\"]\n return {\"content\": [{\"type\": \"text\", \"text\": f\"The weather in {location} is sunny and 72°F.\"}]}\n\nserver = create_sdk_mcp_server(\"weather-tools\", tools=[get_weather])\n\nasync def main():\n options = ClaudeAgentOptions(mcp_servers={\"weather\": server})\n async with ClaudeSDKClient(options=options) as client:\n await client.query(\"What's the weather in Paris?\")\n async for message in client.receive_response():\n if isinstance(message, AssistantMessage):\n for block in message.content:\n if isinstance(block, TextBlock):\n print(block.text)\n\nanyio.run(main)\n```\n\n---\n\n## Hooks\n\n### After Tool Use Hook\n\nLog file changes after any edit:\n\n```python\nimport anyio\nfrom datetime import datetime\nfrom claude_agent_sdk import query, ClaudeAgentOptions, HookMatcher, ResultMessage\n\nasync def log_file_change(input_data, tool_use_id, context):\n file_path = input_data.get('tool_input', {}).get('file_path', 'unknown')\n with open('./audit.log', 'a') as f:\n f.write(f\"{datetime.now()}: modified {file_path}\\n\")\n return {}\n\nasync def main():\n async for message in query(\n prompt=\"Refactor utils.py to improve readability\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Edit\", \"Write\"],\n permission_mode=\"acceptEdits\",\n hooks={\n \"PostToolUse\": [HookMatcher(matcher=\"Edit|Write\", hooks=[log_file_change])]\n }\n )\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n\n---\n\n## Subagents\n\n```python\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions, AgentDefinition, ResultMessage\n\nasync def main():\n async for message in query(\n prompt=\"Use the code-reviewer agent to review this codebase\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Glob\", \"Grep\", \"Agent\"],\n agents={\n \"code-reviewer\": AgentDefinition(\n description=\"Expert code reviewer for quality and security reviews.\",\n prompt=\"Analyze code quality and suggest improvements.\",\n tools=[\"Read\", \"Glob\", \"Grep\"]\n )\n }\n )\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n\n---\n\n## MCP Server Integration\n\n### Browser Automation (Playwright)\n\n```python\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync def main():\n async for message in query(\n prompt=\"Open example.com and describe what you see\",\n options=ClaudeAgentOptions(\n mcp_servers={\n \"playwright\": {\"command\": \"npx\", \"args\": [\"@playwright/mcp@latest\"]}\n }\n )\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n\n### Database Access (PostgreSQL)\n\n```python\nimport os\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync def main():\n async for message in query(\n prompt=\"Show me the top 10 users by order count\",\n options=ClaudeAgentOptions(\n mcp_servers={\n \"postgres\": {\n \"command\": \"npx\",\n \"args\": [\"-y\", \"@modelcontextprotocol/server-postgres\"],\n \"env\": {\"DATABASE_URL\": os.environ[\"DATABASE_URL\"]}\n }\n }\n )\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n\n---\n\n## Permission Modes\n\n```python\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions\n\nasync def main():\n # Default: prompt for dangerous operations\n async for message in query(\n prompt=\"Delete all test files\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Bash\"],\n permission_mode=\"default\" # Will prompt before deleting\n )\n ):\n pass\n\n # Plan: agent creates a plan before making changes\n async for message in query(\n prompt=\"Refactor the auth system\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Edit\"],\n permission_mode=\"plan\"\n )\n ):\n pass\n\n # Accept edits: auto-accept file edits\n async for message in query(\n prompt=\"Refactor this module\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Edit\"],\n permission_mode=\"acceptEdits\"\n )\n ):\n pass\n\n # Bypass: skip all prompts (use with caution)\n async for message in query(\n prompt=\"Set up the development environment\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Bash\", \"Write\"],\n permission_mode=\"bypassPermissions\"\n )\n ):\n pass\n\nanyio.run(main)\n```\n\n---\n\n## Error Recovery\n\n```python\nimport anyio\nfrom claude_agent_sdk import (\n query,\n ClaudeAgentOptions,\n CLINotFoundError,\n CLIConnectionError,\n ProcessError,\n ResultMessage,\n)\n\nasync def run_with_recovery():\n try:\n async for message in query(\n prompt=\"Fix the failing tests\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Edit\", \"Bash\"],\n max_turns=10\n )\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n except CLINotFoundError:\n print(\"Claude Code CLI not found. Install with: pip install claude-agent-sdk\")\n except CLIConnectionError as e:\n print(f\"Connection error: {e}\")\n except ProcessError as e:\n print(f\"Process error: {e}\")\n\nanyio.run(run_with_recovery)\n```\n\n---\n\n## Session Resumption\n\n```python\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage, SystemMessage\n\nasync def main():\n session_id = None\n\n # First query: capture the session ID\n async for message in query(\n prompt=\"Read the authentication module\",\n options=ClaudeAgentOptions(allowed_tools=[\"Read\", \"Glob\"])\n ):\n if isinstance(message, SystemMessage) and message.subtype == \"init\":\n session_id = message.data.get(\"session_id\")\n\n # Resume with full context from the first query\n async for message in query(\n prompt=\"Now find all places that call it\", # \"it\" = auth module\n options=ClaudeAgentOptions(resume=session_id)\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n\n---\n\n## Session History\n\n```python\nfrom claude_agent_sdk import list_sessions, get_session_messages\n\n# List past sessions (sync function — no await)\nsessions = list_sessions()\nfor session in sessions:\n print(f\"Session {session.session_id} in {session.cwd}\")\n\n# Retrieve messages from the most recent session (sync function — no await)\nif sessions:\n messages = get_session_messages(session_id=sessions[0].session_id)\n for msg in messages:\n print(msg)\n```\n\n---\n\n## Session Mutations\n\n```python\nfrom claude_agent_sdk import rename_session, tag_session\n\nsession_id = \"your-session-id\"\n\n# Rename a session\nrename_session(session_id=session_id, title=\"Refactoring auth module\")\n\n# Tag a session for filtering\ntag_session(session_id=session_id, tag=\"experiment-v2\")\n\n# Clear a tag\ntag_session(session_id=session_id, tag=None)\n\n# Scope to a specific project directory\nrename_session(session_id=session_id, title=\"New title\", directory=\"/path/to/project\")\n```\n\n---\n\n## Custom System Prompt\n\n```python\nimport anyio\nfrom claude_agent_sdk import query, ClaudeAgentOptions, ResultMessage\n\nasync def main():\n async for message in query(\n prompt=\"Review this code\",\n options=ClaudeAgentOptions(\n allowed_tools=[\"Read\", \"Glob\", \"Grep\"],\n system_prompt=\"\"\"You are a senior code reviewer focused on:\n1. Security vulnerabilities\n2. Performance issues\n3. Code maintainability\n\nAlways provide specific line numbers and suggestions for improvement.\"\"\"\n )\n ):\n if isinstance(message, ResultMessage):\n print(message.result)\n\nanyio.run(main)\n```\n" + } + ] + }, + { + "name": "claude-api", + "node_type": "folder", + "children": [ + { + "name": "README.md", + "node_type": "file", + "content": "# Claude API — Python\n\n## Installation\n\n```bash\npip install anthropic\n```\n\n## Client Initialization\n\n```python\nimport anthropic\n\n# Default (uses ANTHROPIC_API_KEY env var)\nclient = anthropic.Anthropic()\n\n# Explicit API key\nclient = anthropic.Anthropic(api_key=\"your-api-key\")\n\n# Async client\nasync_client = anthropic.AsyncAnthropic()\n```\n\n---\n\n## Basic Message Request\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[\n {\"role\": \"user\", \"content\": \"What is the capital of France?\"}\n ]\n)\n# response.content is a list of content block objects (TextBlock, ThinkingBlock,\n# ToolUseBlock, ...). Check .type before accessing .text.\nfor block in response.content:\n if block.type == \"text\":\n print(block.text)\n```\n\n---\n\n## System Prompts\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n system=\"You are a helpful coding assistant. Always provide examples in Python.\",\n messages=[{\"role\": \"user\", \"content\": \"How do I read a JSON file?\"}]\n)\n```\n\n---\n\n## Vision (Images)\n\n### Base64\n\n```python\nimport base64\n\nwith open(\"image.png\", \"rb\") as f:\n image_data = base64.standard_b64encode(f.read()).decode(\"utf-8\")\n\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": \"image/png\",\n \"data\": image_data\n }\n },\n {\"type\": \"text\", \"text\": \"What's in this image?\"}\n ]\n }]\n)\n```\n\n### URL\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"url\",\n \"url\": \"https://example.com/image.png\"\n }\n },\n {\"type\": \"text\", \"text\": \"Describe this image\"}\n ]\n }]\n)\n```\n\n---\n\n## Prompt Caching\n\nCache large context to reduce costs (up to 90% savings). **Caching is a prefix match** — any byte change anywhere in the prefix invalidates everything after it. For placement patterns, architectural guidance (frozen system prompt, deterministic tool order, where to put volatile content), and the silent-invalidator audit checklist, read `shared/prompt-caching.md`.\n\n### Automatic Caching (Recommended)\n\nUse top-level `cache_control` to automatically cache the last cacheable block in the request — no need to annotate individual content blocks:\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n cache_control={\"type\": \"ephemeral\"}, # auto-caches the last cacheable block\n system=\"You are an expert on this large document...\",\n messages=[{\"role\": \"user\", \"content\": \"Summarize the key points\"}]\n)\n```\n\n### Manual Cache Control\n\nFor fine-grained control, add `cache_control` to specific content blocks:\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n system=[{\n \"type\": \"text\",\n \"text\": \"You are an expert on this large document...\",\n \"cache_control\": {\"type\": \"ephemeral\"} # default TTL is 5 minutes\n }],\n messages=[{\"role\": \"user\", \"content\": \"Summarize the key points\"}]\n)\n\n# With explicit TTL (time-to-live)\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n system=[{\n \"type\": \"text\",\n \"text\": \"You are an expert on this large document...\",\n \"cache_control\": {\"type\": \"ephemeral\", \"ttl\": \"1h\"} # 1 hour TTL\n }],\n messages=[{\"role\": \"user\", \"content\": \"Summarize the key points\"}]\n)\n```\n\n### Verifying Cache Hits\n\n```python\nprint(response.usage.cache_creation_input_tokens) # tokens written to cache (~1.25x cost)\nprint(response.usage.cache_read_input_tokens) # tokens served from cache (~0.1x cost)\nprint(response.usage.input_tokens) # uncached tokens (full cost)\n```\n\nIf `cache_read_input_tokens` is zero across repeated identical-prefix requests, a silent invalidator is at work — `datetime.now()` or a UUID in the system prompt, unsorted `json.dumps()`, or a varying tool set. See `shared/prompt-caching.md` for the full audit table.\n\n---\n\n## Extended Thinking\n\n> **Opus 4.6 and Sonnet 4.6:** Use adaptive thinking. `budget_tokens` is deprecated on both Opus 4.6 and Sonnet 4.6.\n> **Older models:** Use `thinking: {type: \"enabled\", budget_tokens: N}` (must be < `max_tokens`, min 1024).\n\n```python\n# Opus 4.6: adaptive thinking (recommended)\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n thinking={\"type\": \"adaptive\"},\n output_config={\"effort\": \"high\"}, # low | medium | high | max\n messages=[{\"role\": \"user\", \"content\": \"Solve this step by step...\"}]\n)\n\n# Access thinking and response\nfor block in response.content:\n if block.type == \"thinking\":\n print(f\"Thinking: {block.thinking}\")\n elif block.type == \"text\":\n print(f\"Response: {block.text}\")\n```\n\n---\n\n## Error Handling\n\n```python\nimport anthropic\n\ntry:\n response = client.messages.create(...)\nexcept anthropic.BadRequestError as e:\n print(f\"Bad request: {e.message}\")\nexcept anthropic.AuthenticationError:\n print(\"Invalid API key\")\nexcept anthropic.PermissionDeniedError:\n print(\"API key lacks required permissions\")\nexcept anthropic.NotFoundError:\n print(\"Invalid model or endpoint\")\nexcept anthropic.RateLimitError as e:\n retry_after = int(e.response.headers.get(\"retry-after\", \"60\"))\n print(f\"Rate limited. Retry after {retry_after}s.\")\nexcept anthropic.APIStatusError as e:\n if e.status_code >= 500:\n print(f\"Server error ({e.status_code}). Retry later.\")\n else:\n print(f\"API error: {e.message}\")\nexcept anthropic.APIConnectionError:\n print(\"Network error. Check internet connection.\")\n```\n\n---\n\n## Multi-Turn Conversations\n\nThe API is stateless — send the full conversation history each time.\n\n```python\nclass ConversationManager:\n \"\"\"Manage multi-turn conversations with the Claude API.\"\"\"\n\n def __init__(self, client: anthropic.Anthropic, model: str, system: str = None):\n self.client = client\n self.model = model\n self.system = system\n self.messages = []\n\n def send(self, user_message: str, **kwargs) -> str:\n \"\"\"Send a message and get a response.\"\"\"\n self.messages.append({\"role\": \"user\", \"content\": user_message})\n\n response = self.client.messages.create(\n model=self.model,\n max_tokens=kwargs.get(\"max_tokens\", 16000),\n system=self.system,\n messages=self.messages,\n **kwargs\n )\n\n assistant_message = next(\n (b.text for b in response.content if b.type == \"text\"), \"\"\n )\n self.messages.append({\"role\": \"assistant\", \"content\": assistant_message})\n\n return assistant_message\n\n# Usage\nconversation = ConversationManager(\n client=anthropic.Anthropic(),\n model=\"claude-opus-4-6\",\n system=\"You are a helpful assistant.\"\n)\n\nresponse1 = conversation.send(\"My name is Alice.\")\nresponse2 = conversation.send(\"What's my name?\") # Claude remembers \"Alice\"\n```\n\n**Rules:**\n\n- Messages must alternate between `user` and `assistant`\n- First message must be `user`\n\n---\n\n### Compaction (long conversations)\n\n> **Beta, Opus 4.6 and Sonnet 4.6.** When conversations approach the 200K context window, compaction automatically summarizes earlier context server-side. The API returns a `compaction` block; you must pass it back on subsequent requests — append `response.content`, not just the text.\n\n```python\nimport anthropic\n\nclient = anthropic.Anthropic()\nmessages = []\n\ndef chat(user_message: str) -> str:\n messages.append({\"role\": \"user\", \"content\": user_message})\n\n response = client.beta.messages.create(\n betas=[\"compact-2026-01-12\"],\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=messages,\n context_management={\n \"edits\": [{\"type\": \"compact_20260112\"}]\n }\n )\n\n # Append full content — compaction blocks must be preserved\n messages.append({\"role\": \"assistant\", \"content\": response.content})\n\n return next(block.text for block in response.content if block.type == \"text\")\n\n# Compaction triggers automatically when context grows large\nprint(chat(\"Help me build a Python web scraper\"))\nprint(chat(\"Add support for JavaScript-rendered pages\"))\nprint(chat(\"Now add rate limiting and error handling\"))\n```\n\n---\n\n## Stop Reasons\n\nThe `stop_reason` field in the response indicates why the model stopped generating:\n\n| Value | Meaning |\n|-------|---------|\n| `end_turn` | Claude finished its response naturally |\n| `max_tokens` | Hit the `max_tokens` limit — increase it or use streaming |\n| `stop_sequence` | Hit a custom stop sequence |\n| `tool_use` | Claude wants to call a tool — execute it and continue |\n| `pause_turn` | Model paused and can be resumed (agentic flows) |\n| `refusal` | Claude refused for safety reasons — output may not match your schema |\n\n---\n\n## Cost Optimization Strategies\n\n### 1. Use Prompt Caching for Repeated Context\n\n```python\n# Automatic caching (simplest — caches the last cacheable block)\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n cache_control={\"type\": \"ephemeral\"},\n system=large_document_text, # e.g., 50KB of context\n messages=[{\"role\": \"user\", \"content\": \"Summarize the key points\"}]\n)\n\n# First request: full cost\n# Subsequent requests: ~90% cheaper for cached portion\n```\n\n### 2. Choose the Right Model\n\n```python\n# Default to Opus for most tasks\nresponse = client.messages.create(\n model=\"claude-opus-4-6\", # $5.00/$25.00 per 1M tokens\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Explain quantum computing\"}]\n)\n\n# Use Sonnet for high-volume production workloads\nstandard_response = client.messages.create(\n model=\"claude-sonnet-4-6\", # $3.00/$15.00 per 1M tokens\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Summarize this document\"}]\n)\n\n# Use Haiku only for simple, speed-critical tasks\nsimple_response = client.messages.create(\n model=\"claude-haiku-4-5\", # $1.00/$5.00 per 1M tokens\n max_tokens=256,\n messages=[{\"role\": \"user\", \"content\": \"Classify this as positive or negative\"}]\n)\n```\n\n### 3. Use Token Counting Before Requests\n\n```python\ncount_response = client.messages.count_tokens(\n model=\"claude-opus-4-6\",\n messages=messages,\n system=system\n)\n\nestimated_input_cost = count_response.input_tokens * 0.000005 # $5/1M tokens\nprint(f\"Estimated input cost: ${estimated_input_cost:.4f}\")\n```\n\n---\n\n## Retry with Exponential Backoff\n\n> **Note:** The Anthropic SDK automatically retries rate limit (429) and server errors (5xx) with exponential backoff. You can configure this with `max_retries` (default: 2). Only implement custom retry logic if you need behavior beyond what the SDK provides.\n\n```python\nimport time\nimport random\nimport anthropic\n\ndef call_with_retry(\n client: anthropic.Anthropic,\n max_retries: int = 5,\n base_delay: float = 1.0,\n max_delay: float = 60.0,\n **kwargs\n):\n \"\"\"Call the API with exponential backoff retry.\"\"\"\n last_exception = None\n\n for attempt in range(max_retries):\n try:\n return client.messages.create(**kwargs)\n except anthropic.RateLimitError as e:\n last_exception = e\n except anthropic.APIStatusError as e:\n if e.status_code >= 500:\n last_exception = e\n else:\n raise # Client errors (4xx except 429) should not be retried\n\n delay = min(base_delay * (2 ** attempt) + random.uniform(0, 1), max_delay)\n print(f\"Retry {attempt + 1}/{max_retries} after {delay:.1f}s\")\n time.sleep(delay)\n\n raise last_exception\n```\n" + }, + { + "name": "batches.md", + "node_type": "file", + "content": "# Message Batches API — Python\n\nThe Batches API (`POST /v1/messages/batches`) processes Messages API requests asynchronously at 50% of standard prices.\n\n## Key Facts\n\n- Up to 100,000 requests or 256 MB per batch\n- Most batches complete within 1 hour; maximum 24 hours\n- Results available for 29 days after creation\n- 50% cost reduction on all token usage\n- All Messages API features supported (vision, tools, caching, etc.)\n\n---\n\n## Create a Batch\n\n```python\nimport anthropic\nfrom anthropic.types.message_create_params import MessageCreateParamsNonStreaming\nfrom anthropic.types.messages.batch_create_params import Request\n\nclient = anthropic.Anthropic()\n\nmessage_batch = client.messages.batches.create(\n requests=[\n Request(\n custom_id=\"request-1\",\n params=MessageCreateParamsNonStreaming(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Summarize climate change impacts\"}]\n )\n ),\n Request(\n custom_id=\"request-2\",\n params=MessageCreateParamsNonStreaming(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Explain quantum computing basics\"}]\n )\n ),\n ]\n)\n\nprint(f\"Batch ID: {message_batch.id}\")\nprint(f\"Status: {message_batch.processing_status}\")\n```\n\n---\n\n## Poll for Completion\n\n```python\nimport time\n\nwhile True:\n batch = client.messages.batches.retrieve(message_batch.id)\n if batch.processing_status == \"ended\":\n break\n print(f\"Status: {batch.processing_status}, processing: {batch.request_counts.processing}\")\n time.sleep(60)\n\nprint(\"Batch complete!\")\nprint(f\"Succeeded: {batch.request_counts.succeeded}\")\nprint(f\"Errored: {batch.request_counts.errored}\")\n```\n\n---\n\n## Retrieve Results\n\n> **Note:** Examples below use `match/case` syntax, requiring Python 3.10+. For earlier versions, use `if/elif` chains instead.\n\n```python\nfor result in client.messages.batches.results(message_batch.id):\n match result.result.type:\n case \"succeeded\":\n msg = result.result.message\n text = next((b.text for b in msg.content if b.type == \"text\"), \"\")\n print(f\"[{result.custom_id}] {text[:100]}\")\n case \"errored\":\n if result.result.error.type == \"invalid_request\":\n print(f\"[{result.custom_id}] Validation error - fix request and retry\")\n else:\n print(f\"[{result.custom_id}] Server error - safe to retry\")\n case \"canceled\":\n print(f\"[{result.custom_id}] Canceled\")\n case \"expired\":\n print(f\"[{result.custom_id}] Expired - resubmit\")\n```\n\n---\n\n## Cancel a Batch\n\n```python\ncancelled = client.messages.batches.cancel(message_batch.id)\nprint(f\"Status: {cancelled.processing_status}\") # \"canceling\"\n```\n\n---\n\n## Batch with Prompt Caching\n\n```python\nshared_system = [\n {\"type\": \"text\", \"text\": \"You are a literary analyst.\"},\n {\n \"type\": \"text\",\n \"text\": large_document_text, # Shared across all requests\n \"cache_control\": {\"type\": \"ephemeral\"}\n }\n]\n\nmessage_batch = client.messages.batches.create(\n requests=[\n Request(\n custom_id=f\"analysis-{i}\",\n params=MessageCreateParamsNonStreaming(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n system=shared_system,\n messages=[{\"role\": \"user\", \"content\": question}]\n )\n )\n for i, question in enumerate(questions)\n ]\n)\n```\n\n---\n\n## Full End-to-End Example\n\n```python\nimport anthropic\nimport time\nfrom anthropic.types.message_create_params import MessageCreateParamsNonStreaming\nfrom anthropic.types.messages.batch_create_params import Request\n\nclient = anthropic.Anthropic()\n\n# 1. Prepare requests\nitems_to_classify = [\n \"The product quality is excellent!\",\n \"Terrible customer service, never again.\",\n \"It's okay, nothing special.\",\n]\n\nrequests = [\n Request(\n custom_id=f\"classify-{i}\",\n params=MessageCreateParamsNonStreaming(\n model=\"claude-haiku-4-5\",\n max_tokens=50,\n messages=[{\n \"role\": \"user\",\n \"content\": f\"Classify as positive/negative/neutral (one word): {text}\"\n }]\n )\n )\n for i, text in enumerate(items_to_classify)\n]\n\n# 2. Create batch\nbatch = client.messages.batches.create(requests=requests)\nprint(f\"Created batch: {batch.id}\")\n\n# 3. Wait for completion\nwhile True:\n batch = client.messages.batches.retrieve(batch.id)\n if batch.processing_status == \"ended\":\n break\n time.sleep(10)\n\n# 4. Collect results\nresults = {}\nfor result in client.messages.batches.results(batch.id):\n if result.result.type == \"succeeded\":\n msg = result.result.message\n results[result.custom_id] = next((b.text for b in msg.content if b.type == \"text\"), \"\")\n\nfor custom_id, classification in sorted(results.items()):\n print(f\"{custom_id}: {classification}\")\n```\n" + }, + { + "name": "files-api.md", + "node_type": "file", + "content": "# Files API — Python\n\nThe Files API uploads files for use in Messages API requests. Reference files via `file_id` in content blocks, avoiding re-uploads across multiple API calls.\n\n**Beta:** Pass `betas=[\"files-api-2025-04-14\"]` in your API calls (the SDK sets the required header automatically).\n\n## Key Facts\n\n- Maximum file size: 500 MB\n- Total storage: 100 GB per organization\n- Files persist until deleted\n- File operations (upload, list, delete) are free; content used in messages is billed as input tokens\n- Not available on Amazon Bedrock or Google Vertex AI\n\n---\n\n## Upload a File\n\n```python\nimport anthropic\n\nclient = anthropic.Anthropic()\n\nuploaded = client.beta.files.upload(\n file=(\"report.pdf\", open(\"report.pdf\", \"rb\"), \"application/pdf\"),\n)\nprint(f\"File ID: {uploaded.id}\")\nprint(f\"Size: {uploaded.size_bytes} bytes\")\n```\n\n---\n\n## Use a File in Messages\n\n### PDF / Text Document\n\n```python\nresponse = client.beta.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"Summarize the key findings in this report.\"},\n {\n \"type\": \"document\",\n \"source\": {\"type\": \"file\", \"file_id\": uploaded.id},\n \"title\": \"Q4 Report\", # optional\n \"citations\": {\"enabled\": True} # optional, enables citations\n }\n ]\n }],\n betas=[\"files-api-2025-04-14\"],\n)\nfor block in response.content:\n if block.type == \"text\":\n print(block.text)\n```\n\n### Image\n\n```python\nimage_file = client.beta.files.upload(\n file=(\"photo.png\", open(\"photo.png\", \"rb\"), \"image/png\"),\n)\n\nresponse = client.beta.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What's in this image?\"},\n {\n \"type\": \"image\",\n \"source\": {\"type\": \"file\", \"file_id\": image_file.id}\n }\n ]\n }],\n betas=[\"files-api-2025-04-14\"],\n)\n```\n\n---\n\n## Manage Files\n\n### List Files\n\n```python\nfiles = client.beta.files.list()\nfor f in files.data:\n print(f\"{f.id}: {f.filename} ({f.size_bytes} bytes)\")\n```\n\n### Get File Metadata\n\n```python\nfile_info = client.beta.files.retrieve_metadata(\"file_011CNha8iCJcU1wXNR6q4V8w\")\nprint(f\"Filename: {file_info.filename}\")\nprint(f\"MIME type: {file_info.mime_type}\")\n```\n\n### Delete a File\n\n```python\nclient.beta.files.delete(\"file_011CNha8iCJcU1wXNR6q4V8w\")\n```\n\n### Download a File\n\nOnly files created by the code execution tool or skills can be downloaded (not user-uploaded files).\n\n```python\nfile_content = client.beta.files.download(\"file_011CNha8iCJcU1wXNR6q4V8w\")\nfile_content.write_to_file(\"output.txt\")\n```\n\n---\n\n## Full End-to-End Example\n\nUpload a document once, ask multiple questions about it:\n\n```python\nimport anthropic\n\nclient = anthropic.Anthropic()\n\n# 1. Upload once\nuploaded = client.beta.files.upload(\n file=(\"contract.pdf\", open(\"contract.pdf\", \"rb\"), \"application/pdf\"),\n)\nprint(f\"Uploaded: {uploaded.id}\")\n\n# 2. Ask multiple questions using the same file_id\nquestions = [\n \"What are the key terms and conditions?\",\n \"What is the termination clause?\",\n \"Summarize the payment schedule.\",\n]\n\nfor question in questions:\n response = client.beta.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": question},\n {\n \"type\": \"document\",\n \"source\": {\"type\": \"file\", \"file_id\": uploaded.id}\n }\n ]\n }],\n betas=[\"files-api-2025-04-14\"],\n )\n print(f\"\\nQ: {question}\")\n text = next((b.text for b in response.content if b.type == \"text\"), \"\")\n print(f\"A: {text[:200]}\")\n\n# 3. Clean up when done\nclient.beta.files.delete(uploaded.id)\n```\n" + }, + { + "name": "streaming.md", + "node_type": "file", + "content": "# Streaming — Python\n\n## Quick Start\n\n```python\nwith client.messages.stream(\n model=\"claude-opus-4-6\",\n max_tokens=64000,\n messages=[{\"role\": \"user\", \"content\": \"Write a story\"}]\n) as stream:\n for text in stream.text_stream:\n print(text, end=\"\", flush=True)\n```\n\n### Async\n\n```python\nasync with async_client.messages.stream(\n model=\"claude-opus-4-6\",\n max_tokens=64000,\n messages=[{\"role\": \"user\", \"content\": \"Write a story\"}]\n) as stream:\n async for text in stream.text_stream:\n print(text, end=\"\", flush=True)\n```\n\n---\n\n## Handling Different Content Types\n\nClaude may return text, thinking blocks, or tool use. Handle each appropriately:\n\n> **Opus 4.6:** Use `thinking: {type: \"adaptive\"}`. On older models, use `thinking: {type: \"enabled\", budget_tokens: N}` instead.\n\n```python\nwith client.messages.stream(\n model=\"claude-opus-4-6\",\n max_tokens=64000,\n thinking={\"type\": \"adaptive\"},\n messages=[{\"role\": \"user\", \"content\": \"Analyze this problem\"}]\n) as stream:\n for event in stream:\n if event.type == \"content_block_start\":\n if event.content_block.type == \"thinking\":\n print(\"\\n[Thinking...]\")\n elif event.content_block.type == \"text\":\n print(\"\\n[Response:]\")\n\n elif event.type == \"content_block_delta\":\n if event.delta.type == \"thinking_delta\":\n print(event.delta.thinking, end=\"\", flush=True)\n elif event.delta.type == \"text_delta\":\n print(event.delta.text, end=\"\", flush=True)\n```\n\n---\n\n## Streaming with Tool Use\n\nThe Python tool runner currently returns complete messages. Use streaming for individual API calls within a manual loop if you need per-token streaming with tools:\n\n```python\nwith client.messages.stream(\n model=\"claude-opus-4-6\",\n max_tokens=64000,\n tools=tools,\n messages=messages\n) as stream:\n for text in stream.text_stream:\n print(text, end=\"\", flush=True)\n\n response = stream.get_final_message()\n # Continue with tool execution if response.stop_reason == \"tool_use\"\n```\n\n---\n\n## Getting the Final Message\n\n```python\nwith client.messages.stream(\n model=\"claude-opus-4-6\",\n max_tokens=64000,\n messages=[{\"role\": \"user\", \"content\": \"Hello\"}]\n) as stream:\n for text in stream.text_stream:\n print(text, end=\"\", flush=True)\n\n # Get full message after streaming\n final_message = stream.get_final_message()\n print(f\"\\n\\nTokens used: {final_message.usage.output_tokens}\")\n```\n\n---\n\n## Streaming with Progress Updates\n\n```python\ndef stream_with_progress(client, **kwargs):\n \"\"\"Stream a response with progress updates.\"\"\"\n total_tokens = 0\n content_parts = []\n\n with client.messages.stream(**kwargs) as stream:\n for event in stream:\n if event.type == \"content_block_delta\":\n if event.delta.type == \"text_delta\":\n text = event.delta.text\n content_parts.append(text)\n print(text, end=\"\", flush=True)\n\n elif event.type == \"message_delta\":\n if event.usage and event.usage.output_tokens is not None:\n total_tokens = event.usage.output_tokens\n\n final_message = stream.get_final_message()\n\n print(f\"\\n\\n[Tokens used: {total_tokens}]\")\n return \"\".join(content_parts)\n```\n\n---\n\n## Error Handling in Streams\n\n```python\ntry:\n with client.messages.stream(\n model=\"claude-opus-4-6\",\n max_tokens=64000,\n messages=[{\"role\": \"user\", \"content\": \"Write a story\"}]\n ) as stream:\n for text in stream.text_stream:\n print(text, end=\"\", flush=True)\nexcept anthropic.APIConnectionError:\n print(\"\\nConnection lost. Please retry.\")\nexcept anthropic.RateLimitError:\n print(\"\\nRate limited. Please wait and retry.\")\nexcept anthropic.APIStatusError as e:\n print(f\"\\nAPI error: {e.status_code}\")\n```\n\n---\n\n## Stream Event Types\n\n| Event Type | Description | When it fires |\n| --------------------- | --------------------------- | --------------------------------- |\n| `message_start` | Contains message metadata | Once at the beginning |\n| `content_block_start` | New content block beginning | When a text/tool_use block starts |\n| `content_block_delta` | Incremental content update | For each token/chunk |\n| `content_block_stop` | Content block complete | When a block finishes |\n| `message_delta` | Message-level updates | Contains `stop_reason`, usage |\n| `message_stop` | Message complete | Once at the end |\n\n## Best Practices\n\n1. **Always flush output** — Use `flush=True` to show tokens immediately\n2. **Handle partial responses** — If the stream is interrupted, you may have incomplete content\n3. **Track token usage** — The `message_delta` event contains usage information\n4. **Use timeouts** — Set appropriate timeouts for your application\n5. **Default to streaming** — Use `.get_final_message()` to get the complete response even when streaming, giving you timeout protection without needing to handle individual events\n" + }, + { + "name": "tool-use.md", + "node_type": "file", + "content": "# Tool Use — Python\n\nFor conceptual overview (tool definitions, tool choice, tips), see [shared/tool-use-concepts.md](../../shared/tool-use-concepts.md).\n\n## Tool Runner (Recommended)\n\n**Beta:** The tool runner is in beta in the Python SDK.\n\nUse the `@beta_tool` decorator to define tools as typed functions, then pass them to `client.beta.messages.tool_runner()`:\n\n```python\nimport anthropic\nfrom anthropic import beta_tool\n\nclient = anthropic.Anthropic()\n\n@beta_tool\ndef get_weather(location: str, unit: str = \"celsius\") -> str:\n \"\"\"Get current weather for a location.\n\n Args:\n location: City and state, e.g., San Francisco, CA.\n unit: Temperature unit, either \"celsius\" or \"fahrenheit\".\n \"\"\"\n # Your implementation here\n return f\"72°F and sunny in {location}\"\n\n# The tool runner handles the agentic loop automatically\nrunner = client.beta.messages.tool_runner(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n tools=[get_weather],\n messages=[{\"role\": \"user\", \"content\": \"What's the weather in Paris?\"}],\n)\n\n# Each iteration yields a BetaMessage; iteration stops when Claude is done\nfor message in runner:\n print(message)\n```\n\nFor async usage, use `@beta_async_tool` with `async def` functions.\n\n**Key benefits of the tool runner:**\n\n- No manual loop — the SDK handles calling tools and feeding results back\n- Type-safe tool inputs via decorators\n- Tool schemas are generated automatically from function signatures\n- Iteration stops automatically when Claude has no more tool calls\n\n---\n\n## MCP Tool Conversion Helpers\n\n**Beta.** Convert [MCP (Model Context Protocol)](https://modelcontextprotocol.io/) tools, prompts, and resources to Anthropic API types for use with the tool runner. Requires `pip install anthropic[mcp]` (Python 3.10+).\n\n> **Note:** The Claude API also supports an `mcp_servers` parameter that lets Claude connect directly to remote MCP servers. Use these helpers instead when you need local MCP servers, prompts, resources, or more control over the MCP connection.\n\n### MCP Tools with Tool Runner\n\n```python\nfrom anthropic import AsyncAnthropic\nfrom anthropic.lib.tools.mcp import async_mcp_tool\nfrom mcp import ClientSession\nfrom mcp.client.stdio import stdio_client, StdioServerParameters\n\nclient = AsyncAnthropic()\n\nasync with stdio_client(StdioServerParameters(command=\"mcp-server\")) as (read, write):\n async with ClientSession(read, write) as mcp_client:\n await mcp_client.initialize()\n\n tools_result = await mcp_client.list_tools()\n # tool_runner is sync — returns the runner, not a coroutine\n runner = client.beta.messages.tool_runner(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Use the available tools\"}],\n tools=[async_mcp_tool(t, mcp_client) for t in tools_result.tools],\n )\n async for message in runner:\n print(message)\n```\n\nFor sync usage, use `mcp_tool` instead of `async_mcp_tool`.\n\n### MCP Prompts\n\n```python\nfrom anthropic.lib.tools.mcp import mcp_message\n\nprompt = await mcp_client.get_prompt(name=\"my-prompt\")\nresponse = await client.beta.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[mcp_message(m) for m in prompt.messages],\n)\n```\n\n### MCP Resources as Content\n\n```python\nfrom anthropic.lib.tools.mcp import mcp_resource_to_content\n\nresource = await mcp_client.read_resource(uri=\"file:///path/to/doc.txt\")\nresponse = await client.beta.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": [\n mcp_resource_to_content(resource),\n {\"type\": \"text\", \"text\": \"Summarize this document\"},\n ],\n }],\n)\n```\n\n### Upload MCP Resources as Files\n\n```python\nfrom anthropic.lib.tools.mcp import mcp_resource_to_file\n\nresource = await mcp_client.read_resource(uri=\"file:///path/to/data.json\")\nuploaded = await client.beta.files.upload(file=mcp_resource_to_file(resource))\n```\n\nConversion functions raise `UnsupportedMCPValueError` if an MCP value cannot be converted (e.g., unsupported content types like audio, unsupported MIME types).\n\n---\n\n## Manual Agentic Loop\n\nUse this when you need fine-grained control over the loop (e.g., custom logging, conditional tool execution, human-in-the-loop approval):\n\n```python\nimport anthropic\n\nclient = anthropic.Anthropic()\ntools = [...] # Your tool definitions\nmessages = [{\"role\": \"user\", \"content\": user_input}]\n\n# Agentic loop: keep going until Claude stops calling tools\nwhile True:\n response = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n tools=tools,\n messages=messages\n )\n\n # If Claude is done (no more tool calls), break\n if response.stop_reason == \"end_turn\":\n break\n\n # Server-side tool hit iteration limit; re-send to continue\n if response.stop_reason == \"pause_turn\":\n messages = [\n {\"role\": \"user\", \"content\": user_input},\n {\"role\": \"assistant\", \"content\": response.content},\n ]\n continue\n\n # Extract tool use blocks from the response\n tool_use_blocks = [b for b in response.content if b.type == \"tool_use\"]\n\n # Append assistant's response (including tool_use blocks)\n messages.append({\"role\": \"assistant\", \"content\": response.content})\n\n # Execute each tool and collect results\n tool_results = []\n for tool in tool_use_blocks:\n result = execute_tool(tool.name, tool.input) # Your implementation\n tool_results.append({\n \"type\": \"tool_result\",\n \"tool_use_id\": tool.id, # Must match the tool_use block's id\n \"content\": result\n })\n\n # Append tool results as a user message\n messages.append({\"role\": \"user\", \"content\": tool_results})\n\n# Final response text\nfinal_text = next(b.text for b in response.content if b.type == \"text\")\n```\n\n---\n\n## Handling Tool Results\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n tools=tools,\n messages=[{\"role\": \"user\", \"content\": \"What's the weather in Paris?\"}]\n)\n\nfor block in response.content:\n if block.type == \"tool_use\":\n tool_name = block.name\n tool_input = block.input\n tool_use_id = block.id\n\n result = execute_tool(tool_name, tool_input)\n\n followup = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n tools=tools,\n messages=[\n {\"role\": \"user\", \"content\": \"What's the weather in Paris?\"},\n {\"role\": \"assistant\", \"content\": response.content},\n {\n \"role\": \"user\",\n \"content\": [{\n \"type\": \"tool_result\",\n \"tool_use_id\": tool_use_id,\n \"content\": result\n }]\n }\n ]\n )\n```\n\n---\n\n## Multiple Tool Calls\n\n```python\ntool_results = []\n\nfor block in response.content:\n if block.type == \"tool_use\":\n result = execute_tool(block.name, block.input)\n tool_results.append({\n \"type\": \"tool_result\",\n \"tool_use_id\": block.id,\n \"content\": result\n })\n\n# Send all results back at once\nif tool_results:\n followup = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n tools=tools,\n messages=[\n *previous_messages,\n {\"role\": \"assistant\", \"content\": response.content},\n {\"role\": \"user\", \"content\": tool_results}\n ]\n )\n```\n\n---\n\n## Error Handling in Tool Results\n\n```python\ntool_result = {\n \"type\": \"tool_result\",\n \"tool_use_id\": tool_use_id,\n \"content\": \"Error: Location 'xyz' not found. Please provide a valid city name.\",\n \"is_error\": True\n}\n```\n\n---\n\n## Tool Choice\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n tools=tools,\n tool_choice={\"type\": \"tool\", \"name\": \"get_weather\"}, # Force specific tool\n messages=[{\"role\": \"user\", \"content\": \"What's the weather in Paris?\"}]\n)\n```\n\n---\n\n## Code Execution\n\n### Basic Usage\n\n```python\nimport anthropic\n\nclient = anthropic.Anthropic()\n\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": \"Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\"\n }],\n tools=[{\n \"type\": \"code_execution_20260120\",\n \"name\": \"code_execution\"\n }]\n)\n\nfor block in response.content:\n if block.type == \"text\":\n print(block.text)\n elif block.type == \"bash_code_execution_tool_result\":\n print(f\"stdout: {block.content.stdout}\")\n```\n\n### Upload Files for Analysis\n\n```python\n# 1. Upload a file\nuploaded = client.beta.files.upload(file=open(\"sales_data.csv\", \"rb\"))\n\n# 2. Pass to code execution via container_upload block\n# Code execution is GA; Files API is still beta (pass via extra_headers)\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n extra_headers={\"anthropic-beta\": \"files-api-2025-04-14\"},\n messages=[{\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"Analyze this sales data. Show trends and create a visualization.\"},\n {\"type\": \"container_upload\", \"file_id\": uploaded.id}\n ]\n }],\n tools=[{\"type\": \"code_execution_20260120\", \"name\": \"code_execution\"}]\n)\n```\n\n### Retrieve Generated Files\n\n```python\nimport os\n\nOUTPUT_DIR = \"./claude_outputs\"\nos.makedirs(OUTPUT_DIR, exist_ok=True)\n\nfor block in response.content:\n if block.type == \"bash_code_execution_tool_result\":\n result = block.content\n if result.type == \"bash_code_execution_result\" and result.content:\n for file_ref in result.content:\n if file_ref.type == \"bash_code_execution_output\":\n metadata = client.beta.files.retrieve_metadata(file_ref.file_id)\n file_content = client.beta.files.download(file_ref.file_id)\n # Use basename to prevent path traversal; validate result\n safe_name = os.path.basename(metadata.filename)\n if not safe_name or safe_name in (\".\", \"..\"):\n print(f\"Skipping invalid filename: {metadata.filename}\")\n continue\n output_path = os.path.join(OUTPUT_DIR, safe_name)\n file_content.write_to_file(output_path)\n print(f\"Saved: {output_path}\")\n```\n\n### Container Reuse\n\n```python\n# First request: set up environment\nresponse1 = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Install tabulate and create data.json with sample data\"}],\n tools=[{\"type\": \"code_execution_20260120\", \"name\": \"code_execution\"}]\n)\n\n# Get container ID from response\ncontainer_id = response1.container.id\n\n# Second request: reuse the same container\nresponse2 = client.messages.create(\n container=container_id,\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Read data.json and display as a formatted table\"}],\n tools=[{\"type\": \"code_execution_20260120\", \"name\": \"code_execution\"}]\n)\n```\n\n### Response Structure\n\n```python\nfor block in response.content:\n if block.type == \"text\":\n print(block.text) # Claude's explanation\n elif block.type == \"server_tool_use\":\n print(f\"Running: {block.name} - {block.input}\") # What Claude is doing\n elif block.type == \"bash_code_execution_tool_result\":\n result = block.content\n if result.type == \"bash_code_execution_result\":\n if result.return_code == 0:\n print(f\"Output: {result.stdout}\")\n else:\n print(f\"Error: {result.stderr}\")\n else:\n print(f\"Tool error: {result.error_code}\")\n elif block.type == \"text_editor_code_execution_tool_result\":\n print(f\"File operation: {block.content}\")\n```\n\n---\n\n## Memory Tool\n\n### Basic Usage\n\n```python\nimport anthropic\n\nclient = anthropic.Anthropic()\n\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Remember that my preferred language is Python.\"}],\n tools=[{\"type\": \"memory_20250818\", \"name\": \"memory\"}],\n)\n```\n\n### SDK Memory Helper\n\nSubclass `BetaAbstractMemoryTool`:\n\n```python\nfrom anthropic.lib.tools import BetaAbstractMemoryTool\n\nclass MyMemoryTool(BetaAbstractMemoryTool):\n def view(self, command): ...\n def create(self, command): ...\n def str_replace(self, command): ...\n def insert(self, command): ...\n def delete(self, command): ...\n def rename(self, command): ...\n\nmemory = MyMemoryTool()\n\n# Use with tool runner\nrunner = client.beta.messages.tool_runner(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n tools=[memory],\n messages=[{\"role\": \"user\", \"content\": \"Remember my preferences\"}],\n)\n\nfor message in runner:\n print(message)\n```\n\nFor full implementation examples, use WebFetch:\n\n- `https://github.com/anthropics/anthropic-sdk-python/blob/main/examples/memory/basic.py`\n\n---\n\n## Structured Outputs\n\n### JSON Outputs (Pydantic — Recommended)\n\n```python\nfrom pydantic import BaseModel\nfrom typing import List\nimport anthropic\n\nclass ContactInfo(BaseModel):\n name: str\n email: str\n plan: str\n interests: List[str]\n demo_requested: bool\n\nclient = anthropic.Anthropic()\n\nresponse = client.messages.parse(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": \"Extract: Jane Doe (jane@co.com) wants Enterprise, interested in API and SDKs, wants a demo.\"\n }],\n output_format=ContactInfo,\n)\n\n# response.parsed_output is a validated ContactInfo instance\ncontact = response.parsed_output\nprint(contact.name) # \"Jane Doe\"\nprint(contact.interests) # [\"API\", \"SDKs\"]\n```\n\n### Raw Schema\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\n \"role\": \"user\",\n \"content\": \"Extract info: John Smith (john@example.com) wants the Enterprise plan.\"\n }],\n output_config={\n \"format\": {\n \"type\": \"json_schema\",\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"email\": {\"type\": \"string\"},\n \"plan\": {\"type\": \"string\"},\n \"demo_requested\": {\"type\": \"boolean\"}\n },\n \"required\": [\"name\", \"email\", \"plan\", \"demo_requested\"],\n \"additionalProperties\": False\n }\n }\n }\n)\n\nimport json\n# output_config.format guarantees the first block is text with valid JSON\ntext = next(b.text for b in response.content if b.type == \"text\")\ndata = json.loads(text)\n```\n\n### Strict Tool Use\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Book a flight to Tokyo for 2 passengers on March 15\"}],\n tools=[{\n \"name\": \"book_flight\",\n \"description\": \"Book a flight to a destination\",\n \"strict\": True,\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\"type\": \"string\"},\n \"date\": {\"type\": \"string\", \"format\": \"date\"},\n \"passengers\": {\"type\": \"integer\", \"enum\": [1, 2, 3, 4, 5, 6, 7, 8]}\n },\n \"required\": [\"destination\", \"date\", \"passengers\"],\n \"additionalProperties\": False\n }\n }]\n)\n```\n\n### Using Both Together\n\n```python\nresponse = client.messages.create(\n model=\"claude-opus-4-6\",\n max_tokens=16000,\n messages=[{\"role\": \"user\", \"content\": \"Plan a trip to Paris next month\"}],\n output_config={\n \"format\": {\n \"type\": \"json_schema\",\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"summary\": {\"type\": \"string\"},\n \"next_steps\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}}\n },\n \"required\": [\"summary\", \"next_steps\"],\n \"additionalProperties\": False\n }\n }\n },\n tools=[{\n \"name\": \"search_flights\",\n \"description\": \"Search for available flights\",\n \"strict\": True,\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\"type\": \"string\"},\n \"date\": {\"type\": \"string\", \"format\": \"date\"}\n },\n \"required\": [\"destination\", \"date\"],\n \"additionalProperties\": False\n }\n }]\n)\n```\n" + } + ] + } + ] + }, + { + "name": "ruby", + "node_type": "folder", + "children": [ + { + "name": "claude-api.md", + "node_type": "file", + "content": "# Claude API — Ruby\n\n> **Note:** The Ruby SDK supports the Claude API. A tool runner is available in beta via `client.beta.messages.tool_runner()`. Agent SDK is not yet available for Ruby.\n\n## Installation\n\n```bash\ngem install anthropic\n```\n\n## Client Initialization\n\n```ruby\nrequire \"anthropic\"\n\n# Default (uses ANTHROPIC_API_KEY env var)\nclient = Anthropic::Client.new\n\n# Explicit API key\nclient = Anthropic::Client.new(api_key: \"your-api-key\")\n```\n\n---\n\n## Basic Message Request\n\n```ruby\nmessage = client.messages.create(\n model: :\"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n { role: \"user\", content: \"What is the capital of France?\" }\n ]\n)\n# content is an array of polymorphic block objects (TextBlock, ThinkingBlock,\n# ToolUseBlock, ...). .type is a Symbol — compare with :text, not \"text\".\n# .text raises NoMethodError on non-TextBlock entries.\nmessage.content.each do |block|\n puts block.text if block.type == :text\nend\n```\n\n---\n\n## Streaming\n\n```ruby\nstream = client.messages.stream(\n model: :\"claude-opus-4-6\",\n max_tokens: 64000,\n messages: [{ role: \"user\", content: \"Write a haiku\" }]\n)\n\nstream.text.each { |text| print(text) }\n```\n\n---\n\n## Tool Use\n\nThe Ruby SDK supports tool use via raw JSON schema definitions and also provides a beta tool runner for automatic tool execution.\n\n### Tool Runner (Beta)\n\n```ruby\nclass GetWeatherInput < Anthropic::BaseModel\n required :location, String, doc: \"City and state, e.g. San Francisco, CA\"\nend\n\nclass GetWeather < Anthropic::BaseTool\n doc \"Get the current weather for a location\"\n\n input_schema GetWeatherInput\n\n def call(input)\n \"The weather in #{input.location} is sunny and 72°F.\"\n end\nend\n\nclient.beta.messages.tool_runner(\n model: :\"claude-opus-4-6\",\n max_tokens: 16000,\n tools: [GetWeather.new],\n messages: [{ role: \"user\", content: \"What's the weather in San Francisco?\" }]\n).each_message do |message|\n puts message.content\nend\n```\n\n### Manual Loop\n\nSee the [shared tool use concepts](../shared/tool-use-concepts.md) for the tool definition format and agentic loop pattern.\n\n---\n\n## Prompt Caching\n\n`system_:` (trailing underscore — avoids shadowing `Kernel#system`) takes an array of text blocks; set `cache_control` on the last block. Plain hashes work via the `OrHash` type alias. For placement patterns and the silent-invalidator audit checklist, see `shared/prompt-caching.md`.\n\n```ruby\nmessage = client.messages.create(\n model: :\"claude-opus-4-6\",\n max_tokens: 16000,\n system_: [\n { type: \"text\", text: long_system_prompt, cache_control: { type: \"ephemeral\" } }\n ],\n messages: [{ role: \"user\", content: \"Summarize the key points\" }]\n)\n```\n\nFor 1-hour TTL: `cache_control: { type: \"ephemeral\", ttl: \"1h\" }`. There's also a top-level `cache_control:` on `messages.create` that auto-places on the last cacheable block.\n\nVerify hits via `message.usage.cache_creation_input_tokens` / `message.usage.cache_read_input_tokens`.\n" + } + ] + }, + { + "name": "shared", + "node_type": "folder", + "children": [ + { + "name": "error-codes.md", + "node_type": "file", + "content": "# HTTP Error Codes Reference\n\nThis file documents HTTP error codes returned by the Claude API, their common causes, and how to handle them. For language-specific error handling examples, see the `python/` or `typescript/` folders.\n\n## Error Code Summary\n\n| Code | Error Type | Retryable | Common Cause |\n| ---- | ----------------------- | --------- | ------------------------------------ |\n| 400 | `invalid_request_error` | No | Invalid request format or parameters |\n| 401 | `authentication_error` | No | Invalid or missing API key |\n| 403 | `permission_error` | No | API key lacks permission |\n| 404 | `not_found_error` | No | Invalid endpoint or model ID |\n| 413 | `request_too_large` | No | Request exceeds size limits |\n| 429 | `rate_limit_error` | Yes | Too many requests |\n| 500 | `api_error` | Yes | Anthropic service issue |\n| 529 | `overloaded_error` | Yes | API is temporarily overloaded |\n\n## Detailed Error Information\n\n### 400 Bad Request\n\n**Causes:**\n\n- Malformed JSON in request body\n- Missing required parameters (`model`, `max_tokens`, `messages`)\n- Invalid parameter types (e.g., string where integer expected)\n- Empty messages array\n- Messages not alternating user/assistant\n\n**Example error:**\n\n```json\n{\n \"type\": \"error\",\n \"error\": {\n \"type\": \"invalid_request_error\",\n \"message\": \"messages: roles must alternate between \\\"user\\\" and \\\"assistant\\\"\"\n },\n \"request_id\": \"req_011CSHoEeqs5C35K2UUqR7Fy\"\n}\n```\n\n**Fix:** Validate request structure before sending. Check that:\n\n- `model` is a valid model ID\n- `max_tokens` is a positive integer\n- `messages` array is non-empty and alternates correctly\n\n---\n\n### 401 Unauthorized\n\n**Causes:**\n\n- Missing `x-api-key` header or `Authorization` header\n- Invalid API key format\n- Revoked or deleted API key\n\n**Fix:** Ensure `ANTHROPIC_API_KEY` environment variable is set correctly.\n\n---\n\n### 403 Forbidden\n\n**Causes:**\n\n- API key doesn't have access to the requested model\n- Organization-level restrictions\n- Attempting to access beta features without beta access\n\n**Fix:** Check your API key permissions in the Console. You may need a different API key or to request access to specific features.\n\n---\n\n### 404 Not Found\n\n**Causes:**\n\n- Typo in model ID (e.g., `claude-sonnet-4.6` instead of `claude-sonnet-4-6`)\n- Using deprecated model ID\n- Invalid API endpoint\n\n**Fix:** Use exact model IDs from the models documentation. You can use aliases (e.g., `claude-opus-4-6`).\n\n---\n\n### 413 Request Too Large\n\n**Causes:**\n\n- Request body exceeds maximum size\n- Too many tokens in input\n- Image data too large\n\n**Fix:** Reduce input size — truncate conversation history, compress/resize images, or split large documents into chunks.\n\n---\n\n### 400 Validation Errors\n\nSome 400 errors are specifically related to parameter validation:\n\n- `max_tokens` exceeds model's limit\n- Invalid `temperature` value (must be 0.0-1.0)\n- `budget_tokens` >= `max_tokens` in extended thinking\n- Invalid tool definition schema\n\n**Common mistake with extended thinking:**\n\n```\n# Wrong: budget_tokens must be < max_tokens\nthinking: budget_tokens=10000, max_tokens=1000 → Error!\n\n# Correct\nthinking: budget_tokens=10000, max_tokens=16000\n```\n\n---\n\n### 429 Rate Limited\n\n**Causes:**\n\n- Exceeded requests per minute (RPM)\n- Exceeded tokens per minute (TPM)\n- Exceeded tokens per day (TPD)\n\n**Headers to check:**\n\n- `retry-after`: Seconds to wait before retrying\n- `x-ratelimit-limit-*`: Your limits\n- `x-ratelimit-remaining-*`: Remaining quota\n\n**Fix:** The Anthropic SDKs automatically retry 429 and 5xx errors with exponential backoff (default: `max_retries=2`). For custom retry behavior, see the language-specific error handling examples.\n\n---\n\n### 500 Internal Server Error\n\n**Causes:**\n\n- Temporary Anthropic service issue\n- Bug in API processing\n\n**Fix:** Retry with exponential backoff. If persistent, check [status.anthropic.com](https://status.anthropic.com).\n\n---\n\n### 529 Overloaded\n\n**Causes:**\n\n- High API demand\n- Service capacity reached\n\n**Fix:** Retry with exponential backoff. Consider using a different model (Haiku is often less loaded), spreading requests over time, or implementing request queuing.\n\n---\n\n## Common Mistakes and Fixes\n\n| Mistake | Error | Fix |\n| ------------------------------- | ---------------- | ------------------------------------------------------- |\n| `budget_tokens` >= `max_tokens` | 400 | Ensure `budget_tokens` < `max_tokens` |\n| Typo in model ID | 404 | Use valid model ID like `claude-opus-4-6` |\n| First message is `assistant` | 400 | First message must be `user` |\n| Consecutive same-role messages | 400 | Alternate `user` and `assistant` |\n| API key in code | 401 (leaked key) | Use environment variable |\n| Custom retry needs | 429/5xx | SDK retries automatically; customize with `max_retries` |\n\n## Typed Exceptions in SDKs\n\n**Always use the SDK's typed exception classes** instead of checking error messages with string matching. Each HTTP error code maps to a specific exception class:\n\n| HTTP Code | TypeScript Class | Python Class |\n| --------- | --------------------------------- | --------------------------------- |\n| 400 | `Anthropic.BadRequestError` | `anthropic.BadRequestError` |\n| 401 | `Anthropic.AuthenticationError` | `anthropic.AuthenticationError` |\n| 403 | `Anthropic.PermissionDeniedError` | `anthropic.PermissionDeniedError` |\n| 404 | `Anthropic.NotFoundError` | `anthropic.NotFoundError` |\n| 429 | `Anthropic.RateLimitError` | `anthropic.RateLimitError` |\n| 500+ | `Anthropic.InternalServerError` | `anthropic.InternalServerError` |\n| Any | `Anthropic.APIError` | `anthropic.APIError` |\n\n```typescript\n// ✅ Correct: use typed exceptions\ntry {\n const response = await client.messages.create({...});\n} catch (error) {\n if (error instanceof Anthropic.RateLimitError) {\n // Handle rate limiting\n } else if (error instanceof Anthropic.APIError) {\n console.error(`API error ${error.status}:`, error.message);\n }\n}\n\n// ❌ Wrong: don't check error messages with string matching\ntry {\n const response = await client.messages.create({...});\n} catch (error) {\n const msg = error instanceof Error ? error.message : String(error);\n if (msg.includes(\"429\") || msg.includes(\"rate_limit\")) { ... }\n}\n```\n\nAll exception classes extend `Anthropic.APIError`, which has a `status` property. Use `instanceof` checks from most specific to least specific (e.g., check `RateLimitError` before `APIError`).\n" + }, + { + "name": "live-sources.md", + "node_type": "file", + "content": "# Live Documentation Sources\n\nThis file contains WebFetch URLs for fetching current information from platform.claude.com and Agent SDK repositories. Use these when users need the latest data that may have changed since the cached content was last updated.\n\n## When to Use WebFetch\n\n- User explicitly asks for \"latest\" or \"current\" information\n- Cached data seems incorrect\n- User asks about features not covered in cached content\n- User needs specific API details or examples\n\n## Claude API Documentation URLs\n\n### Models & Pricing\n\n| Topic | URL | Extraction Prompt |\n| --------------- | --------------------------------------------------------------------- | ------------------------------------------------------------------------------- |\n| Models Overview | `https://platform.claude.com/docs/en/about-claude/models/overview.md` | \"Extract current model IDs, context windows, and pricing for all Claude models\" |\n| Pricing | `https://platform.claude.com/docs/en/pricing.md` | \"Extract current pricing per million tokens for input and output\" |\n\n### Core Features\n\n| Topic | URL | Extraction Prompt |\n| ----------------- | ---------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |\n| Extended Thinking | `https://platform.claude.com/docs/en/build-with-claude/extended-thinking.md` | \"Extract extended thinking parameters, budget_tokens requirements, and usage examples\" |\n| Adaptive Thinking | `https://platform.claude.com/docs/en/build-with-claude/adaptive-thinking.md` | \"Extract adaptive thinking setup, effort levels, and Claude Opus 4.6 usage examples\" |\n| Effort Parameter | `https://platform.claude.com/docs/en/build-with-claude/effort.md` | \"Extract effort levels, cost-quality tradeoffs, and interaction with thinking\" |\n| Tool Use | `https://platform.claude.com/docs/en/agents-and-tools/tool-use/overview.md` | \"Extract tool definition schema, tool_choice options, and handling tool results\" |\n| Streaming | `https://platform.claude.com/docs/en/build-with-claude/streaming.md` | \"Extract streaming event types, SDK examples, and best practices\" |\n| Prompt Caching | `https://platform.claude.com/docs/en/build-with-claude/prompt-caching.md` | \"Extract cache_control usage, pricing benefits, and implementation examples\" |\n\n### Media & Files\n\n| Topic | URL | Extraction Prompt |\n| ----------- | ---------------------------------------------------------------------- | ----------------------------------------------------------------- |\n| Vision | `https://platform.claude.com/docs/en/build-with-claude/vision.md` | \"Extract supported image formats, size limits, and code examples\" |\n| PDF Support | `https://platform.claude.com/docs/en/build-with-claude/pdf-support.md` | \"Extract PDF handling capabilities, limits, and examples\" |\n\n### API Operations\n\n| Topic | URL | Extraction Prompt |\n| ---------------- | --------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- |\n| Batch Processing | `https://platform.claude.com/docs/en/build-with-claude/batch-processing.md` | \"Extract batch API endpoints, request format, and polling for results\" |\n| Files API | `https://platform.claude.com/docs/en/build-with-claude/files.md` | \"Extract file upload, download, and referencing in messages, including supported types and beta header\" |\n| Token Counting | `https://platform.claude.com/docs/en/build-with-claude/token-counting.md` | \"Extract token counting API usage and examples\" |\n| Rate Limits | `https://platform.claude.com/docs/en/api/rate-limits.md` | \"Extract current rate limits by tier and model\" |\n| Errors | `https://platform.claude.com/docs/en/api/errors.md` | \"Extract HTTP error codes, meanings, and retry guidance\" |\n\n### Tools\n\n| Topic | URL | Extraction Prompt |\n| -------------- | -------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |\n| Code Execution | `https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool.md` | \"Extract code execution tool setup, file upload, container reuse, and response handling\" |\n| Computer Use | `https://platform.claude.com/docs/en/agents-and-tools/tool-use/computer-use.md` | \"Extract computer use tool setup, capabilities, and implementation examples\" |\n\n### Advanced Features\n\n| Topic | URL | Extraction Prompt |\n| ------------------ | ----------------------------------------------------------------------------- | --------------------------------------------------- |\n| Structured Outputs | `https://platform.claude.com/docs/en/build-with-claude/structured-outputs.md` | \"Extract output_config.format usage and schema enforcement\" |\n| Compaction | `https://platform.claude.com/docs/en/build-with-claude/compaction.md` | \"Extract compaction setup, trigger config, and streaming with compaction\" |\n| Citations | `https://platform.claude.com/docs/en/build-with-claude/citations.md` | \"Extract citation format and implementation\" |\n| Context Windows | `https://platform.claude.com/docs/en/build-with-claude/context-windows.md` | \"Extract context window sizes and token management\" |\n\n---\n\n## Claude API SDK Repositories\n\n| SDK | URL | Description |\n| ---------- | --------------------------------------------------------- | ------------------------------ |\n| Python | `https://github.com/anthropics/anthropic-sdk-python` | `anthropic` pip package source |\n| TypeScript | `https://github.com/anthropics/anthropic-sdk-typescript` | `@anthropic-ai/sdk` npm source |\n| Java | `https://github.com/anthropics/anthropic-sdk-java` | `anthropic-java` Maven source |\n| Go | `https://github.com/anthropics/anthropic-sdk-go` | Go module source |\n| Ruby | `https://github.com/anthropics/anthropic-sdk-ruby` | `anthropic` gem source |\n| C# | `https://github.com/anthropics/anthropic-sdk-csharp` | NuGet package source |\n| PHP | `https://github.com/anthropics/anthropic-sdk-php` | Composer package source |\n\n---\n\n## Agent SDK Documentation URLs\n\n### Core Documentation\n\n| Topic | URL | Extraction Prompt |\n| -------------------- | ----------------------------------------------------------- | --------------------------------------------------------------- |\n| Agent SDK Overview | `https://platform.claude.com/docs/en/agent-sdk.md` | \"Extract the Agent SDK overview, key features, and use cases\" |\n| Agent SDK Python | `https://github.com/anthropics/claude-agent-sdk-python` | \"Extract Python SDK installation, imports, and basic usage\" |\n| Agent SDK TypeScript | `https://github.com/anthropics/claude-agent-sdk-typescript` | \"Extract TypeScript SDK installation, imports, and basic usage\" |\n\n### SDK Reference (GitHub READMEs)\n\n| Topic | URL | Extraction Prompt |\n| -------------- | ----------------------------------------------------------------------------------------- | ------------------------------------------------------------ |\n| Python SDK | `https://raw.githubusercontent.com/anthropics/claude-agent-sdk-python/main/README.md` | \"Extract Python SDK API reference, classes, and methods\" |\n| TypeScript SDK | `https://raw.githubusercontent.com/anthropics/claude-agent-sdk-typescript/main/README.md` | \"Extract TypeScript SDK API reference, types, and functions\" |\n\n### npm/PyPI Packages\n\n| Package | URL | Description |\n| ----------------------------------- | -------------------------------------------------------------- | ------------------------- |\n| claude-agent-sdk (Python) | `https://pypi.org/project/claude-agent-sdk/` | Python package on PyPI |\n| @anthropic-ai/claude-agent-sdk (TS) | `https://www.npmjs.com/package/@anthropic-ai/claude-agent-sdk` | TypeScript package on npm |\n\n### GitHub Repositories\n\n| Resource | URL | Description |\n| -------------- | ----------------------------------------------------------- | ----------------------------------- |\n| Python SDK | `https://github.com/anthropics/claude-agent-sdk-python` | Python package source |\n| TypeScript SDK | `https://github.com/anthropics/claude-agent-sdk-typescript` | TypeScript/Node.js package source |\n| MCP Servers | `https://github.com/modelcontextprotocol` | Official MCP server implementations |\n\n---\n\n## Fallback Strategy\n\nIf WebFetch fails (network issues, URL changed):\n\n1. Use cached content from the language-specific files (note the cache date)\n2. Inform user the data may be outdated\n3. Suggest they check platform.claude.com or the GitHub repos directly\n" + }, + { + "name": "models.md", + "node_type": "file", + "content": "# Claude Model Catalog\n\n**Only use exact model IDs listed in this file.** Never guess or construct model IDs — incorrect IDs will cause API errors. Use aliases wherever available. For the latest information, WebFetch the Models Overview URL in `shared/live-sources.md`, or query the Models API directly (see Programmatic Model Discovery below).\n\n## Programmatic Model Discovery\n\nFor **live** capability data — context window, max output tokens, feature support (thinking, vision, effort, structured outputs, etc.) — query the Models API instead of relying on the cached tables below. Use this when the user asks \"what's the context window for X\", \"does model X support vision/thinking/effort\", \"which models support feature Y\", or wants to select a model by capability at runtime.\n\n```python\nm = client.models.retrieve(\"claude-opus-4-6\")\nm.id # \"claude-opus-4-6\"\nm.display_name # \"Claude Opus 4.6\"\nm.max_input_tokens # context window (int)\nm.max_tokens # max output tokens (int)\n\n# capabilities is an untyped nested dict — bracket access, check [\"supported\"] at the leaf\ncaps = m.capabilities\ncaps[\"image_input\"][\"supported\"] # vision\ncaps[\"thinking\"][\"types\"][\"adaptive\"][\"supported\"] # adaptive thinking\ncaps[\"effort\"][\"max\"][\"supported\"] # effort: max (also low/medium/high)\ncaps[\"structured_outputs\"][\"supported\"]\ncaps[\"context_management\"][\"compact_20260112\"][\"supported\"]\n\n# filter across all models — iterate the page object directly (auto-paginates); do NOT use .data\n[m for m in client.models.list()\n if m.capabilities[\"thinking\"][\"types\"][\"adaptive\"][\"supported\"]\n and m.max_input_tokens >= 200_000]\n```\n\nTop-level fields (`id`, `display_name`, `max_input_tokens`, `max_tokens`) are typed attributes. `capabilities` is a dict — use bracket access, not attribute access. The API returns the full capability tree for every model with `supported: true/false` at each leaf, so bracket chains are safe without `.get()` guards. TypeScript SDK: same method names, also auto-paginates on iteration.\n\n### Raw HTTP\n\n```bash\ncurl https://api.anthropic.com/v1/models/claude-opus-4-6 \\\n -H \"x-api-key: $ANTHROPIC_API_KEY\" \\\n -H \"anthropic-version: 2023-06-01\"\n```\n\n```json\n{\n \"id\": \"claude-opus-4-6\",\n \"display_name\": \"Claude Opus 4.6\",\n \"max_input_tokens\": 1000000,\n \"max_tokens\": 128000,\n \"capabilities\": {\n \"image_input\": {\"supported\": true},\n \"structured_outputs\": {\"supported\": true},\n \"thinking\": {\"supported\": true, \"types\": {\"enabled\": {\"supported\": true}, \"adaptive\": {\"supported\": true}}},\n \"effort\": {\"supported\": true, \"low\": {\"supported\": true}, …, \"max\": {\"supported\": true}},\n …\n }\n}\n```\n\n## Current Models (recommended)\n\n| Friendly Name | Alias (use this) | Full ID | Context | Max Output | Status |\n|-------------------|---------------------|-------------------------------|----------------|------------|--------|\n| Claude Opus 4.6 | `claude-opus-4-6` | — | 200K (1M beta) | 128K | Active |\n| Claude Sonnet 4.6 | `claude-sonnet-4-6` | - | 200K (1M beta) | 64K | Active |\n| Claude Haiku 4.5 | `claude-haiku-4-5` | `claude-haiku-4-5-20251001` | 200K | 64K | Active |\n\n### Model Descriptions\n\n- **Claude Opus 4.6** — Our most intelligent model for building agents and coding. Supports adaptive thinking (recommended), 128K max output tokens (requires streaming for large outputs). 1M context window available in beta via `context-1m-2025-08-07` header.\n- **Claude Sonnet 4.6** — Our best combination of speed and intelligence. Supports adaptive thinking (recommended). 1M context window available in beta via `context-1m-2025-08-07` header. 64K max output tokens.\n- **Claude Haiku 4.5** — Fastest and most cost-effective model for simple tasks.\n\n## Legacy Models (still active)\n\n| Friendly Name | Alias (use this) | Full ID | Status |\n|-------------------|---------------------|-------------------------------|--------|\n| Claude Opus 4.5 | `claude-opus-4-5` | `claude-opus-4-5-20251101` | Active |\n| Claude Opus 4.1 | `claude-opus-4-1` | `claude-opus-4-1-20250805` | Active |\n| Claude Sonnet 4.5 | `claude-sonnet-4-5` | `claude-sonnet-4-5-20250929` | Active |\n| Claude Sonnet 4 | `claude-sonnet-4-0` | `claude-sonnet-4-20250514` | Active |\n| Claude Opus 4 | `claude-opus-4-0` | `claude-opus-4-20250514` | Active |\n\n## Deprecated Models (retiring soon)\n\n| Friendly Name | Alias (use this) | Full ID | Status | Retires |\n|-------------------|---------------------|-------------------------------|------------|--------------|\n| Claude Haiku 3 | — | `claude-3-haiku-20240307` | Deprecated | Apr 19, 2026 |\n\n## Retired Models (no longer available)\n\n| Friendly Name | Full ID | Retired |\n|-------------------|-------------------------------|-------------|\n| Claude Sonnet 3.7 | `claude-3-7-sonnet-20250219` | Feb 19, 2026 |\n| Claude Haiku 3.5 | `claude-3-5-haiku-20241022` | Feb 19, 2026 |\n| Claude Opus 3 | `claude-3-opus-20240229` | Jan 5, 2026 |\n| Claude Sonnet 3.5 | `claude-3-5-sonnet-20241022` | Oct 28, 2025 |\n| Claude Sonnet 3.5 | `claude-3-5-sonnet-20240620` | Oct 28, 2025 |\n| Claude Sonnet 3 | `claude-3-sonnet-20240229` | Jul 21, 2025 |\n| Claude 2.1 | `claude-2.1` | Jul 21, 2025 |\n| Claude 2.0 | `claude-2.0` | Jul 21, 2025 |\n\n## Resolving User Requests\n\nWhen a user asks for a model by name, use this table to find the correct model ID:\n\n| User says... | Use this model ID |\n|-------------------------------------------|--------------------------------|\n| \"opus\", \"most powerful\" | `claude-opus-4-6` |\n| \"opus 4.6\" | `claude-opus-4-6` |\n| \"opus 4.5\" | `claude-opus-4-5` |\n| \"opus 4.1\" | `claude-opus-4-1` |\n| \"opus 4\", \"opus 4.0\" | `claude-opus-4-0` |\n| \"sonnet\", \"balanced\" | `claude-sonnet-4-6` |\n| \"sonnet 4.6\" | `claude-sonnet-4-6` |\n| \"sonnet 4.5\" | `claude-sonnet-4-5` |\n| \"sonnet 4\", \"sonnet 4.0\" | `claude-sonnet-4-0` |\n| \"sonnet 3.7\" | Retired — suggest `claude-sonnet-4-5` |\n| \"sonnet 3.5\" | Retired — suggest `claude-sonnet-4-5` |\n| \"haiku\", \"fast\", \"cheap\" | `claude-haiku-4-5` |\n| \"haiku 4.5\" | `claude-haiku-4-5` |\n| \"haiku 3.5\" | Retired — suggest `claude-haiku-4-5` |\n| \"haiku 3\" | Deprecated — suggest `claude-haiku-4-5` |\n" + }, + { + "name": "prompt-caching.md", + "node_type": "file", + "content": "# Prompt Caching — Design & Optimization\n\nThis file covers how to design prompt-building code for effective caching. For language-specific syntax, see the `## Prompt Caching` section in each language's README or single-file doc.\n\n## The one invariant everything follows from\n\n**Prompt caching is a prefix match. Any change anywhere in the prefix invalidates everything after it.**\n\nThe cache key is derived from the exact bytes of the rendered prompt up to each `cache_control` breakpoint. A single byte difference at position N — a timestamp, a reordered JSON key, a different tool in the list — invalidates the cache for all breakpoints at positions ≥ N.\n\nRender order is: `tools` → `system` → `messages`. A breakpoint on the last system block caches both tools and system together.\n\nDesign the prompt-building path around this constraint. Get the ordering right and most caching works for free. Get it wrong and no amount of `cache_control` markers will help.\n\n---\n\n## Workflow for optimizing existing code\n\nWhen asked to add or optimize caching:\n\n1. **Trace the prompt assembly path.** Find where `system`, `tools`, and `messages` are constructed. Identify every input that flows into them.\n2. **Classify each input by stability:**\n - Never changes → belongs early in the prompt, before any breakpoint\n - Changes per-session → belongs after the global prefix, cache per-session\n - Changes per-turn → belongs at the end, after the last breakpoint\n - Changes per-request (timestamps, UUIDs, random IDs) → **eliminate or move to the very end**\n3. **Check rendered order matches stability order.** Stable content must physically precede volatile content. If a timestamp is interpolated into the system prompt header, everything after it is uncacheable regardless of markers.\n4. **Place breakpoints at stability boundaries.** See placement patterns below.\n5. **Audit for silent invalidators.** See anti-patterns table.\n\n---\n\n## Placement patterns\n\n### Large system prompt shared across many requests\n\nPut a breakpoint on the last system text block. If there are tools, they render before system — the marker on the last system block caches tools + system together.\n\n```json\n\"system\": [\n {\"type\": \"text\", \"text\": \"\", \"cache_control\": {\"type\": \"ephemeral\"}}\n]\n```\n\n### Multi-turn conversations\n\nPut a breakpoint on the last content block of the most-recently-appended turn. Each subsequent request reuses the entire prior conversation prefix. Earlier breakpoints remain valid read points, so hits accrue incrementally as the conversation grows.\n\n```json\n// Last content block of the last user turn\nmessages[-1].content[-1].cache_control = {\"type\": \"ephemeral\"}\n```\n\n### Shared prefix, varying suffix\n\nMany requests share a large fixed preamble (few-shot examples, retrieved docs, instructions) but differ in the final question. Put the breakpoint at the end of the **shared** portion, not at the end of the whole prompt — otherwise every request writes a distinct cache entry and nothing is ever read.\n\n```json\n\"messages\": [{\"role\": \"user\", \"content\": [\n {\"type\": \"text\", \"text\": \"\", \"cache_control\": {\"type\": \"ephemeral\"}},\n {\"type\": \"text\", \"text\": \"\"} // no marker — differs every time\n]}]\n```\n\n### Prompts that change from the beginning every time\n\nDon't cache. If the first 1K tokens differ per request, there is no reusable prefix. Adding `cache_control` only pays the cache-write premium with zero reads. Leave it off.\n\n---\n\n## Architectural guidance\n\nThese are the decisions that matter more than marker placement. Fix these first.\n\n**Keep the system prompt frozen.** Don't interpolate \"current date: X\", \"mode: Y\", \"user name: Z\" into the system prompt — those sit at the front of the prefix and invalidate everything downstream. Inject dynamic context as a user or assistant message later in `messages`. A message at turn 5 invalidates nothing before turn 5.\n\n**Don't change tools or model mid-conversation.** Tools render at position 0; adding, removing, or reordering a tool invalidates the entire cache. Same for switching models (caches are model-scoped). If you need \"modes\", don't swap the tool set — give Claude a tool that records the mode transition, or pass the mode as message content. Serialize tools deterministically (sort by name).\n\n**Fork operations must reuse the parent's exact prefix.** Side computations (summarization, compaction, sub-agents) often spin up a separate API call. If the fork rebuilds `system` / `tools` / `model` with any difference, it misses the parent's cache entirely. Copy the parent's `system`, `tools`, and `model` verbatim, then append fork-specific content at the end.\n\n---\n\n## Silent invalidators\n\nWhen reviewing code, grep for these inside anything that feeds the prompt prefix:\n\n| Pattern | Why it breaks caching |\n|---|---|\n| `datetime.now()` / `Date.now()` / `time.time()` in system prompt | Prefix changes every request |\n| `uuid4()` / `crypto.randomUUID()` / request IDs early in content | Same — every request is unique |\n| `json.dumps(d)` without `sort_keys=True` / iterating a `set` | Non-deterministic serialization → prefix bytes differ |\n| f-string interpolating session/user ID into system prompt | Per-user prefix; no cross-user sharing |\n| Conditional system sections (`if flag: system += ...`) | Every flag combination is a distinct prefix |\n| `tools=build_tools(user)` where set varies per user | Tools render at position 0; nothing caches across users |\n\nFix by moving the dynamic piece after the last breakpoint, making it deterministic, or deleting it if it's not load-bearing.\n\n---\n\n## API reference\n\n```json\n\"cache_control\": {\"type\": \"ephemeral\"} // 5-minute TTL (default)\n\"cache_control\": {\"type\": \"ephemeral\", \"ttl\": \"1h\"} // 1-hour TTL\n```\n\n- Max **4** `cache_control` breakpoints per request.\n- Goes on any content block: system text blocks, tool definitions, message content blocks (`text`, `image`, `tool_use`, `tool_result`, `document`).\n- Top-level `cache_control` on `messages.create()` auto-places on the last cacheable block — simplest option when you don't need fine-grained placement.\n- Minimum cacheable prefix is model-dependent (typically 1024–2048 tokens). Shorter prefixes silently won't cache even with a marker.\n\n**Economics:** Cache writes cost ~1.25× base input price; reads cost ~0.1×. A prefix must be used in at least two requests within TTL to break even (one writes the cache, subsequent ones read it). For bursty traffic, the 1-hour TTL keeps entries alive across gaps.\n\n---\n\n## Verifying cache hits\n\nThe response `usage` object reports cache activity:\n\n| Field | Meaning |\n|---|---|\n| `cache_creation_input_tokens` | Tokens written to cache this request (you paid the ~1.25× write premium) |\n| `cache_read_input_tokens` | Tokens served from cache this request (you paid ~0.1×) |\n| `input_tokens` | Tokens processed at full price (not cached) |\n\nIf `cache_read_input_tokens` is zero across repeated requests with identical prefixes, a silent invalidator is at work — diff the rendered prompt bytes between two requests to find it.\n\nLanguage-specific access: `response.usage.cache_read_input_tokens` (Python/TS/Ruby), `$message->usage->cacheReadInputTokens` (PHP), `resp.Usage.CacheReadInputTokens` (Go/C#), `.usage().cacheReadInputTokens()` (Java).\n" + }, + { + "name": "tool-use-concepts.md", + "node_type": "file", + "content": "# Tool Use Concepts\n\nThis file covers the conceptual foundations of tool use with the Claude API. For language-specific code examples, see the `python/`, `typescript/`, or other language folders.\n\n## User-Defined Tools\n\n### Tool Definition Structure\n\n> **Note:** When using the Tool Runner (beta), tool schemas are generated automatically from your function signatures (Python), Zod schemas (TypeScript), annotated classes (Java), `jsonschema` struct tags (Go), or `BaseTool` subclasses (Ruby). The raw JSON schema format below is for the manual approach — including PHP's `BetaRunnableTool`, which wraps a run closure around a hand-written schema — or SDKs without tool runner support.\n\nEach tool requires a name, description, and JSON Schema for its inputs:\n\n```json\n{\n \"name\": \"get_weather\",\n \"description\": \"Get current weather for a location\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"City and state, e.g., San Francisco, CA\"\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"Temperature unit\"\n }\n },\n \"required\": [\"location\"]\n }\n}\n```\n\n**Best practices for tool definitions:**\n\n- Use clear, descriptive names (e.g., `get_weather`, `search_database`, `send_email`)\n- Write detailed descriptions — Claude uses these to decide when to use the tool\n- Include descriptions for each property\n- Use `enum` for parameters with a fixed set of values\n- Mark truly required parameters in `required`; make others optional with defaults\n\n---\n\n### Tool Choice Options\n\nControl when Claude uses tools:\n\n| Value | Behavior |\n| --------------------------------- | --------------------------------------------- |\n| `{\"type\": \"auto\"}` | Claude decides whether to use tools (default) |\n| `{\"type\": \"any\"}` | Claude must use at least one tool |\n| `{\"type\": \"tool\", \"name\": \"...\"}` | Claude must use the specified tool |\n| `{\"type\": \"none\"}` | Claude cannot use tools |\n\nAny `tool_choice` value can also include `\"disable_parallel_tool_use\": true` to force Claude to use at most one tool per response. By default, Claude may request multiple tool calls in a single response.\n\n---\n\n### Tool Runner vs Manual Loop\n\n**Tool Runner (Recommended):** The SDK's tool runner handles the agentic loop automatically — it calls the API, detects tool use requests, executes your tool functions, feeds results back to Claude, and repeats until Claude stops calling tools. Available in Python, TypeScript, Java, Go, Ruby, and PHP SDKs (beta). The Python SDK also provides MCP conversion helpers (`anthropic.lib.tools.mcp`) to convert MCP tools, prompts, and resources for use with the tool runner — see `python/claude-api/tool-use.md` for details.\n\n**Manual Agentic Loop:** Use when you need fine-grained control over the loop (e.g., custom logging, conditional tool execution, human-in-the-loop approval). Loop until `stop_reason == \"end_turn\"`, always append the full `response.content` to preserve tool_use blocks, and ensure each `tool_result` includes the matching `tool_use_id`.\n\n**Stop reasons for server-side tools:** When using server-side tools (code execution, web search, etc.), the API runs a server-side sampling loop. If this loop reaches its default limit of 10 iterations, the response will have `stop_reason: \"pause_turn\"`. To continue, re-send the user message and assistant response and make another API request — the server will resume where it left off. Do NOT add an extra user message like \"Continue.\" — the API detects the trailing `server_tool_use` block and knows to resume automatically.\n\n```python\n# Handle pause_turn in your agentic loop\nif response.stop_reason == \"pause_turn\":\n messages = [\n {\"role\": \"user\", \"content\": user_query},\n {\"role\": \"assistant\", \"content\": response.content},\n ]\n # Make another API request — server resumes automatically\n response = client.messages.create(\n model=\"claude-opus-4-6\", messages=messages, tools=tools\n )\n```\n\nSet a `max_continuations` limit (e.g., 5) to prevent infinite loops. For the full guide, see: `https://platform.claude.com/docs/en/build-with-claude/handling-stop-reasons`\n\n> **Security:** The tool runner executes your tool functions automatically whenever Claude requests them. For tools with side effects (sending emails, modifying databases, financial transactions), validate inputs within your tool functions and consider requiring confirmation for destructive operations. Use the manual agentic loop if you need human-in-the-loop approval before each tool execution.\n\n---\n\n### Handling Tool Results\n\nWhen Claude uses a tool, the response contains a `tool_use` block. You must:\n\n1. Execute the tool with the provided input\n2. Send the result back in a `tool_result` message\n3. Continue the conversation\n\n**Error handling in tool results:** When a tool execution fails, set `\"is_error\": true` and provide an informative error message. Claude will typically acknowledge the error and either try a different approach or ask for clarification.\n\n**Multiple tool calls:** Claude can request multiple tools in a single response. Handle them all before continuing — send all results back in a single `user` message.\n\n---\n\n## Server-Side Tools: Code Execution\n\nThe code execution tool lets Claude run code in a secure, sandboxed container. Unlike user-defined tools, server-side tools run on Anthropic's infrastructure — you don't execute anything client-side. Just include the tool definition and Claude handles the rest.\n\n### Key Facts\n\n- Runs in an isolated container (1 CPU, 5 GiB RAM, 5 GiB disk)\n- No internet access (fully sandboxed)\n- Python 3.11 with data science libraries pre-installed\n- Containers persist for 30 days and can be reused across requests\n- Free when used with web search/web fetch tools; otherwise $0.05/hour after 1,550 free hours/month per organization\n\n### Tool Definition\n\nThe tool requires no schema — just declare it in the `tools` array:\n\n```json\n{\n \"type\": \"code_execution_20260120\",\n \"name\": \"code_execution\"\n}\n```\n\nClaude automatically gains access to `bash_code_execution` (run shell commands) and `text_editor_code_execution` (create/view/edit files).\n\n### Pre-installed Python Libraries\n\n- **Data science**: pandas, numpy, scipy, scikit-learn, statsmodels\n- **Visualization**: matplotlib, seaborn\n- **File processing**: openpyxl, xlsxwriter, pillow, pypdf, pdfplumber, python-docx, python-pptx\n- **Math**: sympy, mpmath\n- **Utilities**: tqdm, python-dateutil, pytz, sqlite3\n\nAdditional packages can be installed at runtime via `pip install`.\n\n### Supported File Types for Upload\n\n| Type | Extensions |\n| ------ | ---------------------------------- |\n| Data | CSV, Excel (.xlsx/.xls), JSON, XML |\n| Images | JPEG, PNG, GIF, WebP |\n| Text | .txt, .md, .py, .js, etc. |\n\n### Container Reuse\n\nReuse containers across requests to maintain state (files, installed packages, variables). Extract the `container_id` from the first response and pass it to subsequent requests.\n\n### Response Structure\n\nThe response contains interleaved text and tool result blocks:\n\n- `text` — Claude's explanation\n- `server_tool_use` — What Claude is doing\n- `bash_code_execution_tool_result` — Code execution output (check `return_code` for success/failure)\n- `text_editor_code_execution_tool_result` — File operation results\n\n> **Security:** Always sanitize filenames with `os.path.basename()` / `path.basename()` before writing downloaded files to disk to prevent path traversal attacks. Write files to a dedicated output directory.\n\n---\n\n## Server-Side Tools: Web Search and Web Fetch\n\nWeb search and web fetch let Claude search the web and retrieve page content. They run server-side — just include the tool definitions and Claude handles queries, fetching, and result processing automatically.\n\n### Tool Definitions\n\n```json\n[\n { \"type\": \"web_search_20260209\", \"name\": \"web_search\" },\n { \"type\": \"web_fetch_20260209\", \"name\": \"web_fetch\" }\n]\n```\n\n### Dynamic Filtering (Opus 4.6 / Sonnet 4.6)\n\nThe `web_search_20260209` and `web_fetch_20260209` versions support **dynamic filtering** — Claude writes and executes code to filter search results before they reach the context window, improving accuracy and token efficiency. Dynamic filtering is built into these tool versions and activates automatically; you do not need to separately declare the `code_execution` tool or pass any beta header.\n\n```json\n{\n \"tools\": [\n { \"type\": \"web_search_20260209\", \"name\": \"web_search\" },\n { \"type\": \"web_fetch_20260209\", \"name\": \"web_fetch\" }\n ]\n}\n```\n\nWithout dynamic filtering, the previous `web_search_20250305` version is also available.\n\n> **Note:** Only include the standalone `code_execution` tool when your application needs code execution for its own purposes (data analysis, file processing, visualization) independent of web search. Including it alongside `_20260209` web tools creates a second execution environment that can confuse the model.\n\n---\n\n## Server-Side Tools: Programmatic Tool Calling\n\nProgrammatic tool calling lets Claude execute complex multi-tool workflows in code, keeping intermediate results out of the context window. Claude writes code that calls your tools directly, reducing token usage for multi-step operations.\n\nFor full documentation, use WebFetch:\n\n- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/programmatic-tool-calling`\n\n---\n\n## Server-Side Tools: Tool Search\n\nThe tool search tool lets Claude dynamically discover tools from large libraries without loading all definitions into the context window. Useful when you have many tools but only a few are relevant to any given query.\n\nFor full documentation, use WebFetch:\n\n- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool`\n\n---\n\n## Tool Use Examples\n\nYou can provide sample tool calls directly in your tool definitions to demonstrate usage patterns and reduce parameter errors. This helps Claude understand how to correctly format tool inputs, especially for tools with complex schemas.\n\nFor full documentation, use WebFetch:\n\n- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/implement-tool-use`\n\n---\n\n## Server-Side Tools: Computer Use\n\nComputer use lets Claude interact with a desktop environment (screenshots, mouse, keyboard). It can be Anthropic-hosted (server-side, like code execution) or self-hosted (you provide the environment and execute actions client-side).\n\nFor full documentation, use WebFetch:\n\n- URL: `https://platform.claude.com/docs/en/agents-and-tools/computer-use/overview`\n\n---\n\n## Client-Side Tools: Memory\n\nThe memory tool enables Claude to store and retrieve information across conversations through a memory file directory. Claude can create, read, update, and delete files that persist between sessions.\n\n### Key Facts\n\n- Client-side tool — you control storage via your implementation\n- Supports commands: `view`, `create`, `str_replace`, `insert`, `delete`, `rename`\n- Operates on files in a `/memories` directory\n- The Python, TypeScript, and Java SDKs provide helper classes/functions for implementing the memory backend\n\n> **Security:** Never store API keys, passwords, tokens, or other secrets in memory files. Be cautious with personally identifiable information (PII) — check data privacy regulations (GDPR, CCPA) before persisting user data. The reference implementations have no built-in access control; in multi-user systems, implement per-user memory directories and authentication in your tool handlers.\n\nFor full implementation examples, use WebFetch:\n\n- Docs: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/memory-tool.md`\n\n---\n\n## Structured Outputs\n\nStructured outputs constrain Claude's responses to follow a specific JSON schema, guaranteeing valid, parseable output. This is not a separate tool — it enhances the Messages API response format and/or tool parameter validation.\n\nTwo features are available:\n\n- **JSON outputs** (`output_config.format`): Control Claude's response format\n- **Strict tool use** (`strict: true`): Guarantee valid tool parameter schemas\n\n**Supported models:** Claude Opus 4.6, Claude Sonnet 4.6, and Claude Haiku 4.5. Legacy models (Claude Opus 4.5, Claude Opus 4.1) also support structured outputs.\n\n> **Recommended:** Use `client.messages.parse()` which automatically validates responses against your schema. When using `messages.create()` directly, use `output_config: {format: {...}}`. The `output_format` convenience parameter is also accepted by some SDK methods (e.g., `.parse()`), but `output_config.format` is the canonical API-level parameter.\n\n### JSON Schema Limitations\n\n**Supported:**\n\n- Basic types: object, array, string, integer, number, boolean, null\n- `enum`, `const`, `anyOf`, `allOf`, `$ref`/`$def`\n- String formats: `date-time`, `time`, `date`, `duration`, `email`, `hostname`, `uri`, `ipv4`, `ipv6`, `uuid`\n- `additionalProperties: false` (required for all objects)\n\n**Not supported:**\n\n- Recursive schemas\n- Numerical constraints (`minimum`, `maximum`, `multipleOf`)\n- String constraints (`minLength`, `maxLength`)\n- Complex array constraints\n- `additionalProperties` set to anything other than `false`\n\nThe Python and TypeScript SDKs automatically handle unsupported constraints by removing them from the schema sent to the API and validating them client-side.\n\n### Important Notes\n\n- **First request latency**: New schemas incur a one-time compilation cost. Subsequent requests with the same schema use a 24-hour cache.\n- **Refusals**: If Claude refuses for safety reasons (`stop_reason: \"refusal\"`), the output may not match your schema.\n- **Token limits**: If `stop_reason: \"max_tokens\"`, output may be incomplete. Increase `max_tokens`.\n- **Incompatible with**: Citations (returns 400 error), message prefilling.\n- **Works with**: Batches API, streaming, token counting, extended thinking.\n\n---\n\n## Tips for Effective Tool Use\n\n1. **Provide detailed descriptions**: Claude relies heavily on descriptions to understand when and how to use tools\n2. **Use specific tool names**: `get_current_weather` is better than `weather`\n3. **Validate inputs**: Always validate tool inputs before execution\n4. **Handle errors gracefully**: Return informative error messages so Claude can adapt\n5. **Limit tool count**: Too many tools can confuse the model — keep the set focused\n6. **Test tool interactions**: Verify Claude uses tools correctly in various scenarios\n\nFor detailed tool use documentation, use WebFetch:\n\n- URL: `https://platform.claude.com/docs/en/agents-and-tools/tool-use/overview`\n" + } + ] + }, + { + "name": "typescript", + "node_type": "folder", + "children": [ + { + "name": "agent-sdk", + "node_type": "folder", + "children": [ + { + "name": "README.md", + "node_type": "file", + "content": "# Agent SDK — TypeScript\n\nThe Claude Agent SDK provides a higher-level interface for building AI agents with built-in tools, safety features, and agentic capabilities.\n\n## Installation\n\n```bash\nnpm install @anthropic-ai/claude-agent-sdk\n```\n\n---\n\n## Quick Start\n\n```typescript\nimport { query } from \"@anthropic-ai/claude-agent-sdk\";\n\nfor await (const message of query({\n prompt: \"Explain this codebase\",\n options: { allowedTools: [\"Read\", \"Glob\", \"Grep\"] },\n})) {\n if (\"result\" in message) {\n console.log(message.result);\n }\n}\n```\n\n---\n\n## Built-in Tools\n\n| Tool | Description |\n| --------- | ------------------------------------ |\n| Read | Read files in the workspace |\n| Write | Create new files |\n| Edit | Make precise edits to existing files |\n| Bash | Execute shell commands |\n| Glob | Find files by pattern |\n| Grep | Search files by content |\n| WebSearch | Search the web for information |\n| WebFetch | Fetch and analyze web pages |\n| AskUserQuestion | Ask user clarifying questions |\n| Agent | Spawn subagents |\n\n---\n\n## Permission System\n\n```typescript\nfor await (const message of query({\n prompt: \"Refactor the authentication module\",\n options: {\n allowedTools: [\"Read\", \"Edit\", \"Write\"],\n permissionMode: \"acceptEdits\",\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\nPermission modes:\n\n- `\"default\"`: Prompt for dangerous operations\n- `\"plan\"`: Planning only, no execution\n- `\"acceptEdits\"`: Auto-accept file edits\n- `\"dontAsk\"`: Don't prompt — **denies** anything not pre-approved (not an auto-approve mode)\n- `\"bypassPermissions\"`: Skip all prompts (requires `allowDangerouslySkipPermissions: true` in options)\n\n---\n\n## MCP (Model Context Protocol) Support\n\n```typescript\nfor await (const message of query({\n prompt: \"Open example.com and describe what you see\",\n options: {\n mcpServers: {\n playwright: { command: \"npx\", args: [\"@playwright/mcp@latest\"] },\n },\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\n### In-Process MCP Tools\n\nYou can define custom tools that run in-process using `tool()` and `createSdkMcpServer`:\n\n```typescript\nimport { query, tool, createSdkMcpServer } from \"@anthropic-ai/claude-agent-sdk\";\nimport { z } from \"zod\";\n\nconst myTool = tool(\"my-tool\", \"Description\", { input: z.string() }, async (args) => {\n return { content: [{ type: \"text\", text: \"result\" }] };\n});\n\nconst server = createSdkMcpServer({ name: \"my-server\", tools: [myTool] });\n\n// Pass to query\nfor await (const message of query({\n prompt: \"Use my-tool to do something\",\n options: { mcpServers: { myServer: server } },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\n---\n\n## Hooks\n\n```typescript\nimport { query, HookCallback } from \"@anthropic-ai/claude-agent-sdk\";\nimport { appendFileSync } from \"fs\";\n\nconst logFileChange: HookCallback = async (input) => {\n const filePath = (input as any).tool_input?.file_path ?? \"unknown\";\n appendFileSync(\n \"./audit.log\",\n `${new Date().toISOString()}: modified ${filePath}\\n`,\n );\n return {};\n};\n\nfor await (const message of query({\n prompt: \"Refactor utils.py to improve readability\",\n options: {\n allowedTools: [\"Read\", \"Edit\", \"Write\"],\n permissionMode: \"acceptEdits\",\n hooks: {\n PostToolUse: [{ matcher: \"Edit|Write\", hooks: [logFileChange] }],\n },\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\nHook event inputs for tool-lifecycle events (`PreToolUse`, `PostToolUse`, `PostToolUseFailure`) include `agent_id` and `agent_type` fields, allowing hooks to identify which agent (main or subagent) triggered the tool call.\n\nAvailable hook events: `PreToolUse`, `PostToolUse`, `PostToolUseFailure`, `Notification`, `UserPromptSubmit`, `SessionStart`, `SessionEnd`, `Stop`, `SubagentStart`, `SubagentStop`, `PreCompact`, `PermissionRequest`, `Setup`, `TeammateIdle`, `TaskCompleted`, `ConfigChange`, `Elicitation`, `ElicitationResult`, `WorktreeCreate`, `WorktreeRemove`, `InstructionsLoaded`\n\n---\n\n## Common Options\n\n`query()` takes a top-level `prompt` (string) and an `options` object:\n\n```typescript\nquery({ prompt: \"...\", options: { ... } })\n```\n\n| Option | Type | Description |\n| ----------------------------------- | ------ | -------------------------------------------------------------------------- |\n| `cwd` | string | Working directory for file operations |\n| `allowedTools` | array | Tools the agent can use (e.g., `[\"Read\", \"Edit\", \"Bash\"]`) |\n| `tools` | array \\| preset | Built-in tools to make available (`string[]` or `{type:'preset', preset:'claude_code'}`) |\n| `disallowedTools` | array | Tools to explicitly disallow |\n| `permissionMode` | string | How to handle permission prompts |\n| `allowDangerouslySkipPermissions` | bool | Must be `true` to use `permissionMode: \"bypassPermissions\"` |\n| `mcpServers` | object | MCP servers to connect to |\n| `hooks` | object | Hooks for customizing behavior |\n| `systemPrompt` | string \\| preset | Custom system prompt (`string` or `{type:'preset', preset:'claude_code', append?:string}`) |\n| `maxTurns` | number | Maximum agent turns before stopping |\n| `maxBudgetUsd` | number | Maximum budget in USD for the query |\n| `model` | string | Model ID (default: determined by CLI) |\n| `agents` | object | Subagent definitions (`Record`) |\n| `outputFormat` | object | Structured output schema |\n| `thinking` | object | Thinking/reasoning control |\n| `betas` | array | Beta features to enable (e.g., `[\"context-1m-2025-08-07\"]`) |\n| `settingSources` | array | Settings to load (e.g., `[\"project\"]`). Default: none (no CLAUDE.md files) |\n| `env` | object | Environment variables to set for the session |\n| `agentProgressSummaries` | bool | Enable periodic AI-generated progress summaries on `task_progress` events |\n\n---\n\n## Subagents\n\n```typescript\nfor await (const message of query({\n prompt: \"Use the code-reviewer agent to review this codebase\",\n options: {\n allowedTools: [\"Read\", \"Glob\", \"Grep\", \"Agent\"],\n agents: {\n \"code-reviewer\": {\n description: \"Expert code reviewer for quality and security reviews.\",\n prompt: \"Analyze code quality and suggest improvements.\",\n tools: [\"Read\", \"Glob\", \"Grep\"],\n // Optional: skills, mcpServers for subagent customization\n },\n },\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\n---\n\n## Message Types\n\n```typescript\nfor await (const message of query({\n prompt: \"Find TODO comments\",\n options: { allowedTools: [\"Read\", \"Glob\", \"Grep\"] },\n})) {\n if (\"result\" in message) {\n console.log(message.result);\n console.log(`Stop reason: ${message.stop_reason}`); // e.g., \"end_turn\", \"tool_use\", \"max_tokens\"\n } else if (message.type === \"system\" && message.subtype === \"init\") {\n const sessionId = message.session_id; // Capture for resuming later\n }\n}\n```\n\nTask-related system messages are also emitted for subagent operations:\n- `task_started` — emitted when a subagent task is registered\n- `task_progress` — real-time progress updates with cumulative usage metrics, tool counts, and duration (enable `agentProgressSummaries` option for periodic AI-generated summaries via the `summary` field)\n- `task_notification` — task completion notifications (includes `tool_use_id` for correlating with originating tool calls)\n\n---\n\n## Session History\n\nRetrieve past session data:\n\n```typescript\nimport { listSessions, getSessionMessages, getSessionInfo } from \"@anthropic-ai/claude-agent-sdk\";\n\n// List all past sessions (supports pagination via limit/offset)\nconst sessions = await listSessions({ limit: 20, offset: 0 });\nfor (const session of sessions) {\n console.log(`${session.sessionId}: ${session.cwd} (tag: ${session.tag})`);\n}\n\n// Get metadata for a single session\nconst sessionId = sessions[0]?.sessionId;\nconst info = await getSessionInfo(sessionId);\nconsole.log(info.tag, info.createdAt);\n\n// Get messages from a specific session (supports pagination via limit/offset)\nconst messages = await getSessionMessages(sessionId, { limit: 50, offset: 0 });\nfor (const msg of messages) {\n console.log(msg);\n}\n```\n\n### Session Mutations\n\nRename, tag, or fork sessions:\n\n```typescript\nimport { renameSession, tagSession, forkSession } from \"@anthropic-ai/claude-agent-sdk\";\n\n// Rename a session\nawait renameSession(sessionId, \"My refactoring session\");\n\n// Tag a session\nawait tagSession(sessionId, \"experiment\");\n\n// Clear a tag\nawait tagSession(sessionId, null);\n\n// Fork a session — branch a conversation from a specific point\nconst { sessionId: forkedId } = await forkSession(sessionId);\n```\n\n---\n\n## MCP Server Management\n\nManage MCP servers at runtime on a running query:\n\n```typescript\n// Reconnect a disconnected MCP server\nawait queryHandle.reconnectMcpServer(\"my-server\");\n\n// Toggle an MCP server on/off\nawait queryHandle.toggleMcpServer(\"my-server\", false); // (name, enabled) — both required\n\n// Get status of ALL configured MCP servers — returns an ARRAY\nconst statuses: McpServerStatus[] = await queryHandle.mcpServerStatus();\nfor (const s of statuses) {\n console.log(s.name, s.scope, s.tools.length, s.error);\n}\n```\n\n---\n\n## Best Practices\n\n1. **Always specify allowedTools** — Explicitly list which tools the agent can use\n2. **Set working directory** — Always specify `cwd` for file operations\n3. **Use appropriate permission modes** — Start with `\"default\"` and only escalate when needed\n4. **Handle all message types** — Check for `result` property to get agent output\n5. **Limit maxTurns** — Prevent runaway agents with reasonable limits\n" + }, + { + "name": "patterns.md", + "node_type": "file", + "content": "# Agent SDK Patterns — TypeScript\n\n## Basic Agent\n\n```typescript\nimport { query } from \"@anthropic-ai/claude-agent-sdk\";\n\nasync function main() {\n for await (const message of query({\n prompt: \"Explain what this repository does\",\n options: {\n cwd: \"/path/to/project\",\n allowedTools: [\"Read\", \"Glob\", \"Grep\"],\n },\n })) {\n if (\"result\" in message) {\n console.log(message.result);\n }\n }\n}\n\nmain();\n```\n\n---\n\n## Hooks\n\n### After Tool Use Hook\n\n```typescript\nimport { query, HookCallback } from \"@anthropic-ai/claude-agent-sdk\";\nimport { appendFileSync } from \"fs\";\n\nconst logFileChange: HookCallback = async (input) => {\n const filePath = (input as any).tool_input?.file_path ?? \"unknown\";\n appendFileSync(\n \"./audit.log\",\n `${new Date().toISOString()}: modified ${filePath}\\n`,\n );\n return {};\n};\n\nfor await (const message of query({\n prompt: \"Refactor utils.py to improve readability\",\n options: {\n allowedTools: [\"Read\", \"Edit\", \"Write\"],\n permissionMode: \"acceptEdits\",\n hooks: {\n PostToolUse: [{ matcher: \"Edit|Write\", hooks: [logFileChange] }],\n },\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\n---\n\n## Subagents\n\n```typescript\nimport { query } from \"@anthropic-ai/claude-agent-sdk\";\n\nfor await (const message of query({\n prompt: \"Use the code-reviewer agent to review this codebase\",\n options: {\n allowedTools: [\"Read\", \"Glob\", \"Grep\", \"Agent\"],\n agents: {\n \"code-reviewer\": {\n description: \"Expert code reviewer for quality and security reviews.\",\n prompt: \"Analyze code quality and suggest improvements.\",\n tools: [\"Read\", \"Glob\", \"Grep\"],\n },\n },\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\n---\n\n## MCP Server Integration\n\n### Browser Automation (Playwright)\n\n```typescript\nfor await (const message of query({\n prompt: \"Open example.com and describe what you see\",\n options: {\n mcpServers: {\n playwright: { command: \"npx\", args: [\"@playwright/mcp@latest\"] },\n },\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\n---\n\n## Session Resumption\n\n```typescript\nimport { query } from \"@anthropic-ai/claude-agent-sdk\";\n\nlet sessionId: string | undefined;\n\n// First query: capture the session ID\nfor await (const message of query({\n prompt: \"Read the authentication module\",\n options: { allowedTools: [\"Read\", \"Glob\"] },\n})) {\n if (message.type === \"system\" && message.subtype === \"init\") {\n sessionId = message.session_id;\n }\n}\n\n// Resume with full context from the first query\nfor await (const message of query({\n prompt: \"Now find all places that call it\",\n options: { resume: sessionId },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n\n---\n\n## Session History\n\n```typescript\nimport { listSessions, getSessionMessages, getSessionInfo } from \"@anthropic-ai/claude-agent-sdk\";\n\nasync function main() {\n // List past sessions (supports pagination via limit/offset)\n const sessions = await listSessions();\n for (const session of sessions) {\n console.log(`Session ${session.sessionId} in ${session.cwd} (tag: ${session.tag})`);\n }\n\n // Get metadata for a single session\n if (sessions.length > 0) {\n const info = await getSessionInfo(sessions[0].sessionId);\n console.log(`Created: ${info.createdAt}, Tag: ${info.tag}`);\n }\n\n // Retrieve messages from the most recent session\n if (sessions.length > 0) {\n const messages = await getSessionMessages(sessions[0].sessionId, { limit: 50 });\n for (const msg of messages) {\n console.log(msg);\n }\n }\n}\n\nmain();\n```\n\n---\n\n## Session Mutations\n\n```typescript\nimport { renameSession, tagSession, forkSession } from \"@anthropic-ai/claude-agent-sdk\";\n\nasync function main() {\n const sessionId = \"your-session-id\";\n\n // Rename a session\n await renameSession(sessionId, \"Refactoring auth module\");\n\n // Tag a session for filtering\n await tagSession(sessionId, \"experiment-v2\");\n\n // Clear a tag\n await tagSession(sessionId, null);\n\n // Fork a conversation to branch from a point\n const { sessionId: forkedId } = await forkSession(sessionId);\n console.log(`Forked session: ${forkedId}`);\n}\n\nmain();\n```\n\n---\n\n## Custom System Prompt\n\n```typescript\nimport { query } from \"@anthropic-ai/claude-agent-sdk\";\n\nfor await (const message of query({\n prompt: \"Review this code\",\n options: {\n allowedTools: [\"Read\", \"Glob\", \"Grep\"],\n systemPrompt: `You are a senior code reviewer focused on:\n1. Security vulnerabilities\n2. Performance issues\n3. Code maintainability\n\nAlways provide specific line numbers and suggestions for improvement.`,\n },\n})) {\n if (\"result\" in message) console.log(message.result);\n}\n```\n" + } + ] + }, + { + "name": "claude-api", + "node_type": "folder", + "children": [ + { + "name": "README.md", + "node_type": "file", + "content": "# Claude API — TypeScript\n\n## Installation\n\n```bash\nnpm install @anthropic-ai/sdk\n```\n\n## Client Initialization\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\n\n// Default (uses ANTHROPIC_API_KEY env var)\nconst client = new Anthropic();\n\n// Explicit API key\nconst client = new Anthropic({ apiKey: \"your-api-key\" });\n```\n\n---\n\n## Basic Message Request\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [{ role: \"user\", content: \"What is the capital of France?\" }],\n});\n// response.content is ContentBlock[] — a discriminated union. Narrow by .type\n// before accessing .text (TypeScript will error on content[0].text without this).\nfor (const block of response.content) {\n if (block.type === \"text\") {\n console.log(block.text);\n }\n}\n```\n\n---\n\n## System Prompts\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n system:\n \"You are a helpful coding assistant. Always provide examples in Python.\",\n messages: [{ role: \"user\", content: \"How do I read a JSON file?\" }],\n});\n```\n\n---\n\n## Vision (Images)\n\n### URL\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: [\n {\n type: \"image\",\n source: { type: \"url\", url: \"https://example.com/image.png\" },\n },\n { type: \"text\", text: \"Describe this image\" },\n ],\n },\n ],\n});\n```\n\n### Base64\n\n```typescript\nimport fs from \"fs\";\n\nconst imageData = fs.readFileSync(\"image.png\").toString(\"base64\");\n\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: [\n {\n type: \"image\",\n source: { type: \"base64\", media_type: \"image/png\", data: imageData },\n },\n { type: \"text\", text: \"What's in this image?\" },\n ],\n },\n ],\n});\n```\n\n---\n\n## Prompt Caching\n\n**Caching is a prefix match** — any byte change anywhere in the prefix invalidates everything after it. For placement patterns, architectural guidance (frozen system prompt, deterministic tool order, where to put volatile content), and the silent-invalidator audit checklist, read `shared/prompt-caching.md`.\n\n### Automatic Caching (Recommended)\n\nUse top-level `cache_control` to automatically cache the last cacheable block in the request:\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n cache_control: { type: \"ephemeral\" }, // auto-caches the last cacheable block\n system: \"You are an expert on this large document...\",\n messages: [{ role: \"user\", content: \"Summarize the key points\" }],\n});\n```\n\n### Manual Cache Control\n\nFor fine-grained control, add `cache_control` to specific content blocks:\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n system: [\n {\n type: \"text\",\n text: \"You are an expert on this large document...\",\n cache_control: { type: \"ephemeral\" }, // default TTL is 5 minutes\n },\n ],\n messages: [{ role: \"user\", content: \"Summarize the key points\" }],\n});\n\n// With explicit TTL (time-to-live)\nconst response2 = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n system: [\n {\n type: \"text\",\n text: \"You are an expert on this large document...\",\n cache_control: { type: \"ephemeral\", ttl: \"1h\" }, // 1 hour TTL\n },\n ],\n messages: [{ role: \"user\", content: \"Summarize the key points\" }],\n});\n```\n\n### Verifying Cache Hits\n\n```typescript\nconsole.log(response.usage.cache_creation_input_tokens); // tokens written to cache (~1.25x cost)\nconsole.log(response.usage.cache_read_input_tokens); // tokens served from cache (~0.1x cost)\nconsole.log(response.usage.input_tokens); // uncached tokens (full cost)\n```\n\nIf `cache_read_input_tokens` is zero across repeated identical-prefix requests, a silent invalidator is at work — `Date.now()` or a UUID in the system prompt, non-deterministic key ordering, or a varying tool set. See `shared/prompt-caching.md` for the full audit table.\n\n---\n\n## Extended Thinking\n\n> **Opus 4.6 and Sonnet 4.6:** Use adaptive thinking. `budget_tokens` is deprecated on both Opus 4.6 and Sonnet 4.6.\n> **Older models:** Use `thinking: {type: \"enabled\", budget_tokens: N}` (must be < `max_tokens`, min 1024).\n\n```typescript\n// Opus 4.6: adaptive thinking (recommended)\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n thinking: { type: \"adaptive\" },\n output_config: { effort: \"high\" }, // low | medium | high | max\n messages: [\n { role: \"user\", content: \"Solve this math problem step by step...\" },\n ],\n});\n\nfor (const block of response.content) {\n if (block.type === \"thinking\") {\n console.log(\"Thinking:\", block.thinking);\n } else if (block.type === \"text\") {\n console.log(\"Response:\", block.text);\n }\n}\n```\n\n---\n\n## Error Handling\n\nUse the SDK's typed exception classes — never check error messages with string matching:\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\n\ntry {\n const response = await client.messages.create({...});\n} catch (error) {\n if (error instanceof Anthropic.BadRequestError) {\n console.error(\"Bad request:\", error.message);\n } else if (error instanceof Anthropic.AuthenticationError) {\n console.error(\"Invalid API key\");\n } else if (error instanceof Anthropic.RateLimitError) {\n console.error(\"Rate limited - retry later\");\n } else if (error instanceof Anthropic.APIError) {\n console.error(`API error ${error.status}:`, error.message);\n }\n}\n```\n\nAll classes extend `Anthropic.APIError` with a typed `status` field. Check from most specific to least specific. See [shared/error-codes.md](../../shared/error-codes.md) for the full error code reference.\n\n---\n\n## Multi-Turn Conversations\n\nThe API is stateless — send the full conversation history each time. Use `Anthropic.MessageParam[]` to type the messages array:\n\n```typescript\nconst messages: Anthropic.MessageParam[] = [\n { role: \"user\", content: \"My name is Alice.\" },\n { role: \"assistant\", content: \"Hello Alice! Nice to meet you.\" },\n { role: \"user\", content: \"What's my name?\" },\n];\n\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: messages,\n});\n```\n\n**Rules:**\n\n- Consecutive same-role messages are allowed — the API combines them into a single turn\n- First message must be `user`\n- Use SDK types (`Anthropic.MessageParam`, `Anthropic.Message`, `Anthropic.Tool`, etc.) for all API data structures — don't redefine equivalent interfaces\n\n---\n\n### Compaction (long conversations)\n\n> **Beta, Opus 4.6 and Sonnet 4.6.** When conversations approach the 200K context window, compaction automatically summarizes earlier context server-side. The API returns a `compaction` block; you must pass it back on subsequent requests — append `response.content`, not just the text.\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\n\nconst client = new Anthropic();\nconst messages: Anthropic.Beta.BetaMessageParam[] = [];\n\nasync function chat(userMessage: string): Promise {\n messages.push({ role: \"user\", content: userMessage });\n\n const response = await client.beta.messages.create({\n betas: [\"compact-2026-01-12\"],\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages,\n context_management: {\n edits: [{ type: \"compact_20260112\" }],\n },\n });\n\n // Append full content — compaction blocks must be preserved\n messages.push({ role: \"assistant\", content: response.content });\n\n const textBlock = response.content.find(\n (b): b is Anthropic.Beta.BetaTextBlock => b.type === \"text\",\n );\n return textBlock?.text ?? \"\";\n}\n\n// Compaction triggers automatically when context grows large\nconsole.log(await chat(\"Help me build a Python web scraper\"));\nconsole.log(await chat(\"Add support for JavaScript-rendered pages\"));\nconsole.log(await chat(\"Now add rate limiting and error handling\"));\n```\n\n---\n\n## Stop Reasons\n\nThe `stop_reason` field in the response indicates why the model stopped generating:\n\n| Value | Meaning |\n| --------------- | --------------------------------------------------------------- |\n| `end_turn` | Claude finished its response naturally |\n| `max_tokens` | Hit the `max_tokens` limit — increase it or use streaming |\n| `stop_sequence` | Hit a custom stop sequence |\n| `tool_use` | Claude wants to call a tool — execute it and continue |\n| `pause_turn` | Model paused and can be resumed (agentic flows) |\n| `refusal` | Claude refused for safety reasons — output may not match schema |\n\n---\n\n## Cost Optimization Strategies\n\n### 1. Use Prompt Caching for Repeated Context\n\n```typescript\n// Automatic caching (simplest — caches the last cacheable block)\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n cache_control: { type: \"ephemeral\" },\n system: largeDocumentText, // e.g., 50KB of context\n messages: [{ role: \"user\", content: \"Summarize the key points\" }],\n});\n\n// First request: full cost\n// Subsequent requests: ~90% cheaper for cached portion\n```\n\n### 2. Use Token Counting Before Requests\n\n```typescript\nconst countResponse = await client.messages.countTokens({\n model: \"claude-opus-4-6\",\n messages: messages,\n system: system,\n});\n\nconst estimatedInputCost = countResponse.input_tokens * 0.000005; // $5/1M tokens\nconsole.log(`Estimated input cost: $${estimatedInputCost.toFixed(4)}`);\n```\n" + }, + { + "name": "batches.md", + "node_type": "file", + "content": "# Message Batches API — TypeScript\n\nThe Batches API (`POST /v1/messages/batches`) processes Messages API requests asynchronously at 50% of standard prices.\n\n## Key Facts\n\n- Up to 100,000 requests or 256 MB per batch\n- Most batches complete within 1 hour; maximum 24 hours\n- Results available for 29 days after creation\n- 50% cost reduction on all token usage\n- All Messages API features supported (vision, tools, caching, etc.)\n\n---\n\n## Create a Batch\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\n\nconst client = new Anthropic();\n\nconst messageBatch = await client.messages.batches.create({\n requests: [\n {\n custom_id: \"request-1\",\n params: {\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n { role: \"user\", content: \"Summarize climate change impacts\" },\n ],\n },\n },\n {\n custom_id: \"request-2\",\n params: {\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n { role: \"user\", content: \"Explain quantum computing basics\" },\n ],\n },\n },\n ],\n});\n\nconsole.log(`Batch ID: ${messageBatch.id}`);\nconsole.log(`Status: ${messageBatch.processing_status}`);\n```\n\n---\n\n## Poll for Completion\n\n```typescript\nlet batch;\nwhile (true) {\n batch = await client.messages.batches.retrieve(messageBatch.id);\n if (batch.processing_status === \"ended\") break;\n console.log(\n `Status: ${batch.processing_status}, processing: ${batch.request_counts.processing}`,\n );\n await new Promise((resolve) => setTimeout(resolve, 60_000));\n}\n\nconsole.log(\"Batch complete!\");\nconsole.log(`Succeeded: ${batch.request_counts.succeeded}`);\nconsole.log(`Errored: ${batch.request_counts.errored}`);\n```\n\n---\n\n## Retrieve Results\n\n```typescript\nfor await (const result of await client.messages.batches.results(\n messageBatch.id,\n)) {\n switch (result.result.type) {\n case \"succeeded\":\n console.log(\n `[${result.custom_id}] ${result.result.message.content[0].text.slice(0, 100)}`,\n );\n break;\n case \"errored\":\n if (result.result.error.type === \"invalid_request\") {\n console.log(`[${result.custom_id}] Validation error - fix and retry`);\n } else {\n console.log(`[${result.custom_id}] Server error - safe to retry`);\n }\n break;\n case \"expired\":\n console.log(`[${result.custom_id}] Expired - resubmit`);\n break;\n }\n}\n```\n\n---\n\n## Cancel a Batch\n\n```typescript\nconst cancelled = await client.messages.batches.cancel(messageBatch.id);\nconsole.log(`Status: ${cancelled.processing_status}`); // \"canceling\"\n```\n" + }, + { + "name": "files-api.md", + "node_type": "file", + "content": "# Files API — TypeScript\n\nThe Files API uploads files for use in Messages API requests. Reference files via `file_id` in content blocks, avoiding re-uploads across multiple API calls.\n\n**Beta:** Pass `betas: [\"files-api-2025-04-14\"]` in your API calls (the SDK sets the required header automatically).\n\n## Key Facts\n\n- Maximum file size: 500 MB\n- Total storage: 100 GB per organization\n- Files persist until deleted\n- File operations (upload, list, delete) are free; content used in messages is billed as input tokens\n- Not available on Amazon Bedrock or Google Vertex AI\n\n---\n\n## Upload a File\n\n```typescript\nimport Anthropic, { toFile } from \"@anthropic-ai/sdk\";\nimport fs from \"fs\";\n\nconst client = new Anthropic();\n\nconst uploaded = await client.beta.files.upload({\n file: await toFile(fs.createReadStream(\"report.pdf\"), undefined, {\n type: \"application/pdf\",\n }),\n betas: [\"files-api-2025-04-14\"],\n});\n\nconsole.log(`File ID: ${uploaded.id}`);\nconsole.log(`Size: ${uploaded.size_bytes} bytes`);\n```\n\n---\n\n## Use a File in Messages\n\n### PDF / Text Document\n\n```typescript\nconst response = await client.beta.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: [\n { type: \"text\", text: \"Summarize the key findings in this report.\" },\n {\n type: \"document\",\n source: { type: \"file\", file_id: uploaded.id },\n title: \"Q4 Report\",\n citations: { enabled: true },\n },\n ],\n },\n ],\n betas: [\"files-api-2025-04-14\"],\n});\n\nconsole.log(response.content[0].text);\n```\n\n---\n\n## Manage Files\n\n### List Files\n\n```typescript\nconst files = await client.beta.files.list({\n betas: [\"files-api-2025-04-14\"],\n});\nfor (const f of files.data) {\n console.log(`${f.id}: ${f.filename} (${f.size_bytes} bytes)`);\n}\n```\n\n### Delete a File\n\n```typescript\nawait client.beta.files.delete(\"file_011CNha8iCJcU1wXNR6q4V8w\", {\n betas: [\"files-api-2025-04-14\"],\n});\n```\n\n### Download a File\n\n```typescript\nconst response = await client.beta.files.download(\n \"file_011CNha8iCJcU1wXNR6q4V8w\",\n { betas: [\"files-api-2025-04-14\"] },\n);\nconst content = Buffer.from(await response.arrayBuffer());\nawait fs.promises.writeFile(\"output.txt\", content);\n```\n" + }, + { + "name": "streaming.md", + "node_type": "file", + "content": "# Streaming — TypeScript\n\n## Quick Start\n\n```typescript\nconst stream = client.messages.stream({\n model: \"claude-opus-4-6\",\n max_tokens: 64000,\n messages: [{ role: \"user\", content: \"Write a story\" }],\n});\n\nfor await (const event of stream) {\n if (\n event.type === \"content_block_delta\" &&\n event.delta.type === \"text_delta\"\n ) {\n process.stdout.write(event.delta.text);\n }\n}\n```\n\n---\n\n## Handling Different Content Types\n\n> **Opus 4.6:** Use `thinking: {type: \"adaptive\"}`. On older models, use `thinking: {type: \"enabled\", budget_tokens: N}` instead.\n\n```typescript\nconst stream = client.messages.stream({\n model: \"claude-opus-4-6\",\n max_tokens: 64000,\n thinking: { type: \"adaptive\" },\n messages: [{ role: \"user\", content: \"Analyze this problem\" }],\n});\n\nfor await (const event of stream) {\n switch (event.type) {\n case \"content_block_start\":\n switch (event.content_block.type) {\n case \"thinking\":\n console.log(\"\\n[Thinking...]\");\n break;\n case \"text\":\n console.log(\"\\n[Response:]\");\n break;\n }\n break;\n case \"content_block_delta\":\n switch (event.delta.type) {\n case \"thinking_delta\":\n process.stdout.write(event.delta.thinking);\n break;\n case \"text_delta\":\n process.stdout.write(event.delta.text);\n break;\n }\n break;\n }\n}\n```\n\n---\n\n## Streaming with Tool Use (Tool Runner)\n\nUse the tool runner with `stream: true`. The outer loop iterates over tool runner iterations (messages), the inner loop processes stream events:\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\nimport { betaZodTool } from \"@anthropic-ai/sdk/helpers/beta/zod\";\nimport { z } from \"zod\";\n\nconst client = new Anthropic();\n\nconst getWeather = betaZodTool({\n name: \"get_weather\",\n description: \"Get current weather for a location\",\n inputSchema: z.object({\n location: z.string().describe(\"City and state, e.g., San Francisco, CA\"),\n }),\n run: async ({ location }) => `72°F and sunny in ${location}`,\n});\n\nconst runner = client.beta.messages.toolRunner({\n model: \"claude-opus-4-6\",\n max_tokens: 64000,\n tools: [getWeather],\n messages: [\n { role: \"user\", content: \"What's the weather in Paris and London?\" },\n ],\n stream: true,\n});\n\n// Outer loop: each tool runner iteration\nfor await (const messageStream of runner) {\n // Inner loop: stream events for this iteration\n for await (const event of messageStream) {\n switch (event.type) {\n case \"content_block_delta\":\n switch (event.delta.type) {\n case \"text_delta\":\n process.stdout.write(event.delta.text);\n break;\n case \"input_json_delta\":\n // Tool input being streamed\n break;\n }\n break;\n }\n }\n}\n```\n\n---\n\n## Getting the Final Message\n\n```typescript\nconst stream = client.messages.stream({\n model: \"claude-opus-4-6\",\n max_tokens: 64000,\n messages: [{ role: \"user\", content: \"Hello\" }],\n});\n\nfor await (const event of stream) {\n // Process events...\n}\n\nconst finalMessage = await stream.finalMessage();\nconsole.log(`Tokens used: ${finalMessage.usage.output_tokens}`);\n```\n\n---\n\n## Stream Event Types\n\n| Event Type | Description | When it fires |\n| --------------------- | --------------------------- | --------------------------------- |\n| `message_start` | Contains message metadata | Once at the beginning |\n| `content_block_start` | New content block beginning | When a text/tool_use block starts |\n| `content_block_delta` | Incremental content update | For each token/chunk |\n| `content_block_stop` | Content block complete | When a block finishes |\n| `message_delta` | Message-level updates | Contains `stop_reason`, usage |\n| `message_stop` | Message complete | Once at the end |\n\n## Best Practices\n\n1. **Always flush output** — Use `process.stdout.write()` for immediate display\n2. **Handle partial responses** — If the stream is interrupted, you may have incomplete content\n3. **Track token usage** — The `message_delta` event contains usage information\n4. **Use `finalMessage()`** — Get the complete `Anthropic.Message` object even when streaming. Don't wrap `.on()` events in `new Promise()` — `finalMessage()` handles all completion/error/abort states internally\n5. **Buffer for web UIs** — Consider buffering a few tokens before rendering to avoid excessive DOM updates\n6. **Use `stream.on(\"text\", ...)` for deltas** — The `text` event provides just the delta string, simpler than manually filtering `content_block_delta` events\n7. **For agentic loops with streaming** — See the [Streaming Manual Loop](./tool-use.md#streaming-manual-loop) section in tool-use.md for combining `stream()` + `finalMessage()` with a tool-use loop\n\n## Raw SSE Format\n\nIf using raw HTTP (not SDKs), the stream returns Server-Sent Events:\n\n```\nevent: message_start\ndata: {\"type\":\"message_start\",\"message\":{\"id\":\"msg_...\",\"type\":\"message\",...}}\n\nevent: content_block_start\ndata: {\"type\":\"content_block_start\",\"index\":0,\"content_block\":{\"type\":\"text\",\"text\":\"\"}}\n\nevent: content_block_delta\ndata: {\"type\":\"content_block_delta\",\"index\":0,\"delta\":{\"type\":\"text_delta\",\"text\":\"Hello\"}}\n\nevent: content_block_stop\ndata: {\"type\":\"content_block_stop\",\"index\":0}\n\nevent: message_delta\ndata: {\"type\":\"message_delta\",\"delta\":{\"stop_reason\":\"end_turn\"},\"usage\":{\"output_tokens\":12}}\n\nevent: message_stop\ndata: {\"type\":\"message_stop\"}\n```\n" + }, + { + "name": "tool-use.md", + "node_type": "file", + "content": "# Tool Use — TypeScript\n\nFor conceptual overview (tool definitions, tool choice, tips), see [shared/tool-use-concepts.md](../../shared/tool-use-concepts.md).\n\n## Tool Runner (Recommended)\n\n**Beta:** The tool runner is in beta in the TypeScript SDK.\n\nUse `betaZodTool` with Zod schemas to define tools with a `run` function, then pass them to `client.beta.messages.toolRunner()`:\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\nimport { betaZodTool } from \"@anthropic-ai/sdk/helpers/beta/zod\";\nimport { z } from \"zod\";\n\nconst client = new Anthropic();\n\nconst getWeather = betaZodTool({\n name: \"get_weather\",\n description: \"Get current weather for a location\",\n inputSchema: z.object({\n location: z.string().describe(\"City and state, e.g., San Francisco, CA\"),\n unit: z.enum([\"celsius\", \"fahrenheit\"]).optional(),\n }),\n run: async (input) => {\n // Your implementation here\n return `72°F and sunny in ${input.location}`;\n },\n});\n\n// The tool runner handles the agentic loop and returns the final message\nconst finalMessage = await client.beta.messages.toolRunner({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n tools: [getWeather],\n messages: [{ role: \"user\", content: \"What's the weather in Paris?\" }],\n});\n\nconsole.log(finalMessage.content);\n```\n\n**Key benefits of the tool runner:**\n\n- No manual loop — the SDK handles calling tools and feeding results back\n- Type-safe tool inputs via Zod schemas\n- Tool schemas are generated automatically from Zod definitions\n- Iteration stops automatically when Claude has no more tool calls\n\n---\n\n## Manual Agentic Loop\n\nUse this when you need fine-grained control (custom logging, conditional tool execution, streaming individual iterations, human-in-the-loop approval):\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\n\nconst client = new Anthropic();\nconst tools: Anthropic.Tool[] = [...]; // Your tool definitions\nlet messages: Anthropic.MessageParam[] = [{ role: \"user\", content: userInput }];\n\nwhile (true) {\n const response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n tools: tools,\n messages: messages,\n });\n\n if (response.stop_reason === \"end_turn\") break;\n\n // Server-side tool hit iteration limit; append assistant turn and re-send to continue\n if (response.stop_reason === \"pause_turn\") {\n messages.push({ role: \"assistant\", content: response.content });\n continue;\n }\n\n const toolUseBlocks = response.content.filter(\n (b): b is Anthropic.ToolUseBlock => b.type === \"tool_use\",\n );\n\n messages.push({ role: \"assistant\", content: response.content });\n\n const toolResults: Anthropic.ToolResultBlockParam[] = [];\n for (const tool of toolUseBlocks) {\n const result = await executeTool(tool.name, tool.input);\n toolResults.push({\n type: \"tool_result\",\n tool_use_id: tool.id,\n content: result,\n });\n }\n\n messages.push({ role: \"user\", content: toolResults });\n}\n```\n\n### Streaming Manual Loop\n\nUse `client.messages.stream()` + `finalMessage()` instead of `.create()` when you need streaming within a manual loop. Text deltas are streamed on each iteration; `finalMessage()` collects the complete `Message` so you can inspect `stop_reason` and extract tool-use blocks:\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\n\nconst client = new Anthropic();\nconst tools: Anthropic.Tool[] = [...];\nlet messages: Anthropic.MessageParam[] = [{ role: \"user\", content: userInput }];\n\nwhile (true) {\n const stream = client.messages.stream({\n model: \"claude-opus-4-6\",\n max_tokens: 64000,\n tools,\n messages,\n });\n\n // Stream text deltas on each iteration\n stream.on(\"text\", (delta) => {\n process.stdout.write(delta);\n });\n\n // finalMessage() resolves with the complete Message — no need to\n // manually wire up .on(\"message\") / .on(\"error\") / .on(\"abort\")\n const message = await stream.finalMessage();\n\n if (message.stop_reason === \"end_turn\") break;\n\n // Server-side tool hit iteration limit; append assistant turn and re-send to continue\n if (message.stop_reason === \"pause_turn\") {\n messages.push({ role: \"assistant\", content: message.content });\n continue;\n }\n\n const toolUseBlocks = message.content.filter(\n (b): b is Anthropic.ToolUseBlock => b.type === \"tool_use\",\n );\n\n messages.push({ role: \"assistant\", content: message.content });\n\n const toolResults: Anthropic.ToolResultBlockParam[] = [];\n for (const tool of toolUseBlocks) {\n const result = await executeTool(tool.name, tool.input);\n toolResults.push({\n type: \"tool_result\",\n tool_use_id: tool.id,\n content: result,\n });\n }\n\n messages.push({ role: \"user\", content: toolResults });\n}\n```\n\n> **Important:** Don't wrap `.on()` events in `new Promise()` to collect the final message — use `stream.finalMessage()` instead. The SDK handles all error/abort/completion states internally.\n\n> **Error handling in the loop:** Use the SDK's typed exceptions (e.g., `Anthropic.RateLimitError`, `Anthropic.APIError`) — see [Error Handling](./README.md#error-handling) for examples. Don't check error messages with string matching.\n\n> **SDK types:** Use `Anthropic.MessageParam`, `Anthropic.Tool`, `Anthropic.ToolUseBlock`, `Anthropic.ToolResultBlockParam`, `Anthropic.Message`, etc. for all API-related data structures. Don't redefine equivalent interfaces.\n\n---\n\n## Handling Tool Results\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n tools: tools,\n messages: [{ role: \"user\", content: \"What's the weather in Paris?\" }],\n});\n\nfor (const block of response.content) {\n if (block.type === \"tool_use\") {\n const result = await executeTool(block.name, block.input);\n\n const followup = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n tools: tools,\n messages: [\n { role: \"user\", content: \"What's the weather in Paris?\" },\n { role: \"assistant\", content: response.content },\n {\n role: \"user\",\n content: [\n { type: \"tool_result\", tool_use_id: block.id, content: result },\n ],\n },\n ],\n });\n }\n}\n```\n\n---\n\n## Tool Choice\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n tools: tools,\n tool_choice: { type: \"tool\", name: \"get_weather\" },\n messages: [{ role: \"user\", content: \"What's the weather in Paris?\" }],\n});\n```\n\n---\n\n## Server-Side Tools\n\nVersion-suffixed `type` literals; `name` is fixed per interface. Pass plain object literals — the `ToolUnion` type is satisfied structurally. **The `name`/`type` pair must match the interface**: mixing `str_replace_based_edit_tool` (20250728 name) with `text_editor_20250124` (which expects `str_replace_editor`) is a TS2322.\n\n**Don't type-annotate as `Tool[]`** — `Tool` is just the custom-tool variant. Let structural typing infer from the `tools` param, or annotate as `Anthropic.Messages.ToolUnion[]` if you must:\n\n```typescript\n// ✓ let inference work — no annotation\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n tools: [\n { type: \"text_editor_20250728\", name: \"str_replace_based_edit_tool\" },\n { type: \"bash_20250124\", name: \"bash\" },\n { type: \"web_search_20260209\", name: \"web_search\" },\n { type: \"code_execution_20260120\", name: \"code_execution\" },\n ],\n messages: [{ role: \"user\", content: \"...\" }],\n});\n\n// ✗ this is a TS2352 — Tool is the CUSTOM tool variant only\n// const tools: Anthropic.Tool[] = [{ type: \"text_editor_20250728\", ... }]\n```\n\n| Interface | `name` | `type` |\n|---|---|---|\n| `ToolTextEditor20250124` | `str_replace_editor` | `text_editor_20250124` |\n| `ToolTextEditor20250429` | `str_replace_based_edit_tool` | `text_editor_20250429` |\n| `ToolTextEditor20250728` | `str_replace_based_edit_tool` | `text_editor_20250728` |\n| `ToolBash20250124` | `bash` | `bash_20250124` |\n| `WebSearchTool20260209` | `web_search` | `web_search_20260209` |\n| `WebFetchTool20260209` | `web_fetch` | `web_fetch_20260209` |\n| `CodeExecutionTool20260120` | `code_execution` | `code_execution_20260120` |\n\n**Don't mix beta and non-beta types**: if you call `client.beta.messages.create()`, the response `content` is `BetaContentBlock[]` — you cannot pass that to a non-beta `ContentBlockParam[]` without narrowing each element.\n\n---\n\n\n## Code Execution\n\n### Basic Usage\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\n\nconst client = new Anthropic();\n\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content:\n \"Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\",\n },\n ],\n tools: [{ type: \"code_execution_20260120\", name: \"code_execution\" }],\n});\n```\n\n### Reading Local Files (ESM note)\n\n`__dirname` doesn't exist in ES modules. For script-relative paths use `import.meta.url`:\n\n```typescript\nimport { readFileSync } from \"fs\";\nimport { fileURLToPath } from \"url\";\nimport { dirname, join } from \"path\";\n\nconst __dirname = dirname(fileURLToPath(import.meta.url));\nconst pdfBytes = readFileSync(join(__dirname, \"sample.pdf\"));\n```\n\nOr use a CWD-relative path if the script runs from a known directory: `readFileSync(\"./sample.pdf\")`.\n\n### Upload Files for Analysis\n\n```typescript\nimport Anthropic, { toFile } from \"@anthropic-ai/sdk\";\nimport { createReadStream } from \"fs\";\n\nconst client = new Anthropic();\n\n// 1. Upload a file\nconst uploaded = await client.beta.files.upload({\n file: await toFile(createReadStream(\"sales_data.csv\"), undefined, {\n type: \"text/csv\",\n }),\n betas: [\"files-api-2025-04-14\"],\n});\n\n// 2. Pass to code execution\n// Code execution is GA; Files API is still beta (pass via RequestOptions)\nconst response = await client.messages.create(\n {\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: [\n {\n type: \"text\",\n text: \"Analyze this sales data. Show trends and create a visualization.\",\n },\n { type: \"container_upload\", file_id: uploaded.id },\n ],\n },\n ],\n tools: [{ type: \"code_execution_20260120\", name: \"code_execution\" }],\n },\n { headers: { \"anthropic-beta\": \"files-api-2025-04-14\" } },\n);\n```\n\n### Retrieve Generated Files\n\n```typescript\nimport path from \"path\";\nimport fs from \"fs\";\n\nconst OUTPUT_DIR = \"./claude_outputs\";\nawait fs.promises.mkdir(OUTPUT_DIR, { recursive: true });\n\nfor (const block of response.content) {\n if (block.type === \"bash_code_execution_tool_result\") {\n const result = block.content;\n if (result.type === \"bash_code_execution_result\" && result.content) {\n for (const fileRef of result.content) {\n if (fileRef.type === \"bash_code_execution_output\") {\n const metadata = await client.beta.files.retrieveMetadata(\n fileRef.file_id,\n );\n const downloadResponse = await client.beta.files.download(fileRef.file_id);\n const fileBytes = Buffer.from(await downloadResponse.arrayBuffer());\n const safeName = path.basename(metadata.filename);\n if (!safeName || safeName === \".\" || safeName === \"..\") {\n console.warn(`Skipping invalid filename: ${metadata.filename}`);\n continue;\n }\n const outputPath = path.join(OUTPUT_DIR, safeName);\n await fs.promises.writeFile(outputPath, fileBytes);\n console.log(`Saved: ${outputPath}`);\n }\n }\n }\n }\n}\n```\n\n### Container Reuse\n\n```typescript\n// First request: set up environment\nconst response1 = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: \"Install tabulate and create data.json with sample user data\",\n },\n ],\n tools: [{ type: \"code_execution_20260120\", name: \"code_execution\" }],\n});\n\n// Reuse container\n// container is nullable — set only when using server-side code execution\nconst containerId = response1.container!.id;\n\nconst response2 = await client.messages.create({\n container: containerId,\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: \"Read data.json and display as a formatted table\",\n },\n ],\n tools: [{ type: \"code_execution_20260120\", name: \"code_execution\" }],\n});\n```\n\n---\n\n## Memory Tool\n\n### Basic Usage\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: \"Remember that my preferred language is TypeScript.\",\n },\n ],\n tools: [{ type: \"memory_20250818\", name: \"memory\" }],\n});\n```\n\n### SDK Memory Helper\n\nUse `betaMemoryTool` with a `MemoryToolHandlers` implementation:\n\n```typescript\nimport {\n betaMemoryTool,\n type MemoryToolHandlers,\n} from \"@anthropic-ai/sdk/helpers/beta/memory\";\n\nconst handlers: MemoryToolHandlers = {\n async view(command) { ... },\n async create(command) { ... },\n async str_replace(command) { ... },\n async insert(command) { ... },\n async delete(command) { ... },\n async rename(command) { ... },\n};\n\nconst memory = betaMemoryTool(handlers);\n\nconst runner = client.beta.messages.toolRunner({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n tools: [memory],\n messages: [{ role: \"user\", content: \"Remember my preferences\" }],\n});\n\nfor await (const message of runner) {\n console.log(message);\n}\n```\n\nFor full implementation examples, use WebFetch:\n\n- `https://github.com/anthropics/anthropic-sdk-typescript/blob/main/examples/tools-helpers-memory.ts`\n\n---\n\n## Structured Outputs\n\n### JSON Outputs (Zod — Recommended)\n\n```typescript\nimport Anthropic from \"@anthropic-ai/sdk\";\nimport { z } from \"zod\";\nimport { zodOutputFormat } from \"@anthropic-ai/sdk/helpers/zod\";\n\nconst ContactInfoSchema = z.object({\n name: z.string(),\n email: z.string(),\n plan: z.string(),\n interests: z.array(z.string()),\n demo_requested: z.boolean(),\n});\n\nconst client = new Anthropic();\n\nconst response = await client.messages.parse({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content:\n \"Extract: Jane Doe (jane@co.com) wants Enterprise, interested in API and SDKs, wants a demo.\",\n },\n ],\n output_config: {\n format: zodOutputFormat(ContactInfoSchema),\n },\n});\n\n// parsed_output is null if parsing failed — assert or guard\nconsole.log(response.parsed_output!.name); // \"Jane Doe\"\n```\n\n### Strict Tool Use\n\n```typescript\nconst response = await client.messages.create({\n model: \"claude-opus-4-6\",\n max_tokens: 16000,\n messages: [\n {\n role: \"user\",\n content: \"Book a flight to Tokyo for 2 passengers on March 15\",\n },\n ],\n tools: [\n {\n name: \"book_flight\",\n description: \"Book a flight to a destination\",\n strict: true,\n input_schema: {\n type: \"object\",\n properties: {\n destination: { type: \"string\" },\n date: { type: \"string\", format: \"date\" },\n passengers: {\n type: \"integer\",\n enum: [1, 2, 3, 4, 5, 6, 7, 8],\n },\n },\n required: [\"destination\", \"date\", \"passengers\"],\n additionalProperties: false,\n },\n },\n ],\n});\n```\n" + } + ] + } + ] + } +] + +export default children diff --git a/web/app/components/workflow/skill/start-tab/templates/skills/docx.ts b/web/app/components/workflow/skill/start-tab/templates/skills/docx.ts index 57673ed82e..47d37e89ed 100644 --- a/web/app/components/workflow/skill/start-tab/templates/skills/docx.ts +++ b/web/app/components/workflow/skill/start-tab/templates/skills/docx.ts @@ -6,7 +6,7 @@ const children: SkillTemplateNode[] = [ { "name": "SKILL.md", "node_type": "file", - "content": "---\nname: docx\ndescription: \"Use this skill whenever the user wants to create, read, edit, or manipulate Word documents (.docx files). Triggers include: any mention of \\\"Word doc\\\", \\\"word document\\\", \\\".docx\\\", or requests to produce professional documents with formatting like tables of contents, headings, page numbers, or letterheads. Also use when extracting or reorganizing content from .docx files, inserting or replacing images in documents, performing find-and-replace in Word files, working with tracked changes or comments, or converting content into a polished Word document. If the user asks for a \\\"report\\\", \\\"memo\\\", \\\"letter\\\", \\\"template\\\", or similar deliverable as a Word or .docx file, use this skill. Do NOT use for PDFs, spreadsheets, Google Docs, or general coding tasks unrelated to document generation.\"\nlicense: Proprietary. LICENSE.txt has complete terms\n---\n\n# DOCX creation, editing, and analysis\n\n## Overview\n\nA .docx file is a ZIP archive containing XML files.\n\n## Quick Reference\n\n| Task | Approach |\n|------|----------|\n| Read/analyze content | `pandoc` or unpack for raw XML |\n| Create new document | Use `docx-js` - see Creating New Documents below |\n| Edit existing document | Unpack → edit XML → repack - see Editing Existing Documents below |\n\n### Converting .doc to .docx\n\nLegacy `.doc` files must be converted before editing:\n\n```bash\npython scripts/office/soffice.py --headless --convert-to docx document.doc\n```\n\n### Reading Content\n\n```bash\n# Text extraction with tracked changes\npandoc --track-changes=all document.docx -o output.md\n\n# Raw XML access\npython scripts/office/unpack.py document.docx unpacked/\n```\n\n### Converting to Images\n\n```bash\npython scripts/office/soffice.py --headless --convert-to pdf document.docx\npdftoppm -jpeg -r 150 document.pdf page\n```\n\n### Accepting Tracked Changes\n\nTo produce a clean document with all tracked changes accepted (requires LibreOffice):\n\n```bash\npython scripts/accept_changes.py input.docx output.docx\n```\n\n---\n\n## Creating New Documents\n\nGenerate .docx files with JavaScript, then validate. Install: `npm install -g docx`\n\n### Setup\n```javascript\nconst { Document, Packer, Paragraph, TextRun, Table, TableRow, TableCell, ImageRun,\n Header, Footer, AlignmentType, PageOrientation, LevelFormat, ExternalHyperlink,\n TableOfContents, HeadingLevel, BorderStyle, WidthType, ShadingType,\n VerticalAlign, PageNumber, PageBreak } = require('docx');\n\nconst doc = new Document({ sections: [{ children: [/* content */] }] });\nPacker.toBuffer(doc).then(buffer => fs.writeFileSync(\"doc.docx\", buffer));\n```\n\n### Validation\nAfter creating the file, validate it. If validation fails, unpack, fix the XML, and repack.\n```bash\npython scripts/office/validate.py doc.docx\n```\n\n### Page Size\n\n```javascript\n// CRITICAL: docx-js defaults to A4, not US Letter\n// Always set page size explicitly for consistent results\nsections: [{\n properties: {\n page: {\n size: {\n width: 12240, // 8.5 inches in DXA\n height: 15840 // 11 inches in DXA\n },\n margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } // 1 inch margins\n }\n },\n children: [/* content */]\n}]\n```\n\n**Common page sizes (DXA units, 1440 DXA = 1 inch):**\n\n| Paper | Width | Height | Content Width (1\" margins) |\n|-------|-------|--------|---------------------------|\n| US Letter | 12,240 | 15,840 | 9,360 |\n| A4 (default) | 11,906 | 16,838 | 9,026 |\n\n**Landscape orientation:** docx-js swaps width/height internally, so pass portrait dimensions and let it handle the swap:\n```javascript\nsize: {\n width: 12240, // Pass SHORT edge as width\n height: 15840, // Pass LONG edge as height\n orientation: PageOrientation.LANDSCAPE // docx-js swaps them in the XML\n},\n// Content width = 15840 - left margin - right margin (uses the long edge)\n```\n\n### Styles (Override Built-in Headings)\n\nUse Arial as the default font (universally supported). Keep titles black for readability.\n\n```javascript\nconst doc = new Document({\n styles: {\n default: { document: { run: { font: \"Arial\", size: 24 } } }, // 12pt default\n paragraphStyles: [\n // IMPORTANT: Use exact IDs to override built-in styles\n { id: \"Heading1\", name: \"Heading 1\", basedOn: \"Normal\", next: \"Normal\", quickFormat: true,\n run: { size: 32, bold: true, font: \"Arial\" },\n paragraph: { spacing: { before: 240, after: 240 }, outlineLevel: 0 } }, // outlineLevel required for TOC\n { id: \"Heading2\", name: \"Heading 2\", basedOn: \"Normal\", next: \"Normal\", quickFormat: true,\n run: { size: 28, bold: true, font: \"Arial\" },\n paragraph: { spacing: { before: 180, after: 180 }, outlineLevel: 1 } },\n ]\n },\n sections: [{\n children: [\n new Paragraph({ heading: HeadingLevel.HEADING_1, children: [new TextRun(\"Title\")] }),\n ]\n }]\n});\n```\n\n### Lists (NEVER use unicode bullets)\n\n```javascript\n// ❌ WRONG - never manually insert bullet characters\nnew Paragraph({ children: [new TextRun(\"• Item\")] }) // BAD\nnew Paragraph({ children: [new TextRun(\"\\u2022 Item\")] }) // BAD\n\n// ✅ CORRECT - use numbering config with LevelFormat.BULLET\nconst doc = new Document({\n numbering: {\n config: [\n { reference: \"bullets\",\n levels: [{ level: 0, format: LevelFormat.BULLET, text: \"•\", alignment: AlignmentType.LEFT,\n style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] },\n { reference: \"numbers\",\n levels: [{ level: 0, format: LevelFormat.DECIMAL, text: \"%1.\", alignment: AlignmentType.LEFT,\n style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] },\n ]\n },\n sections: [{\n children: [\n new Paragraph({ numbering: { reference: \"bullets\", level: 0 },\n children: [new TextRun(\"Bullet item\")] }),\n new Paragraph({ numbering: { reference: \"numbers\", level: 0 },\n children: [new TextRun(\"Numbered item\")] }),\n ]\n }]\n});\n\n// ⚠️ Each reference creates INDEPENDENT numbering\n// Same reference = continues (1,2,3 then 4,5,6)\n// Different reference = restarts (1,2,3 then 1,2,3)\n```\n\n### Tables\n\n**CRITICAL: Tables need dual widths** - set both `columnWidths` on the table AND `width` on each cell. Without both, tables render incorrectly on some platforms.\n\n```javascript\n// CRITICAL: Always set table width for consistent rendering\n// CRITICAL: Use ShadingType.CLEAR (not SOLID) to prevent black backgrounds\nconst border = { style: BorderStyle.SINGLE, size: 1, color: \"CCCCCC\" };\nconst borders = { top: border, bottom: border, left: border, right: border };\n\nnew Table({\n width: { size: 9360, type: WidthType.DXA }, // Always use DXA (percentages break in Google Docs)\n columnWidths: [4680, 4680], // Must sum to table width (DXA: 1440 = 1 inch)\n rows: [\n new TableRow({\n children: [\n new TableCell({\n borders,\n width: { size: 4680, type: WidthType.DXA }, // Also set on each cell\n shading: { fill: \"D5E8F0\", type: ShadingType.CLEAR }, // CLEAR not SOLID\n margins: { top: 80, bottom: 80, left: 120, right: 120 }, // Cell padding (internal, not added to width)\n children: [new Paragraph({ children: [new TextRun(\"Cell\")] })]\n })\n ]\n })\n ]\n})\n```\n\n**Table width calculation:**\n\nAlways use `WidthType.DXA` — `WidthType.PERCENTAGE` breaks in Google Docs.\n\n```javascript\n// Table width = sum of columnWidths = content width\n// US Letter with 1\" margins: 12240 - 2880 = 9360 DXA\nwidth: { size: 9360, type: WidthType.DXA },\ncolumnWidths: [7000, 2360] // Must sum to table width\n```\n\n**Width rules:**\n- **Always use `WidthType.DXA`** — never `WidthType.PERCENTAGE` (incompatible with Google Docs)\n- Table width must equal the sum of `columnWidths`\n- Cell `width` must match corresponding `columnWidth`\n- Cell `margins` are internal padding - they reduce content area, not add to cell width\n- For full-width tables: use content width (page width minus left and right margins)\n\n### Images\n\n```javascript\n// CRITICAL: type parameter is REQUIRED\nnew Paragraph({\n children: [new ImageRun({\n type: \"png\", // Required: png, jpg, jpeg, gif, bmp, svg\n data: fs.readFileSync(\"image.png\"),\n transformation: { width: 200, height: 150 },\n altText: { title: \"Title\", description: \"Desc\", name: \"Name\" } // All three required\n })]\n})\n```\n\n### Page Breaks\n\n```javascript\n// CRITICAL: PageBreak must be inside a Paragraph\nnew Paragraph({ children: [new PageBreak()] })\n\n// Or use pageBreakBefore\nnew Paragraph({ pageBreakBefore: true, children: [new TextRun(\"New page\")] })\n```\n\n### Table of Contents\n\n```javascript\n// CRITICAL: Headings must use HeadingLevel ONLY - no custom styles\nnew TableOfContents(\"Table of Contents\", { hyperlink: true, headingStyleRange: \"1-3\" })\n```\n\n### Headers/Footers\n\n```javascript\nsections: [{\n properties: {\n page: { margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } } // 1440 = 1 inch\n },\n headers: {\n default: new Header({ children: [new Paragraph({ children: [new TextRun(\"Header\")] })] })\n },\n footers: {\n default: new Footer({ children: [new Paragraph({\n children: [new TextRun(\"Page \"), new TextRun({ children: [PageNumber.CURRENT] })]\n })] })\n },\n children: [/* content */]\n}]\n```\n\n### Critical Rules for docx-js\n\n- **Set page size explicitly** - docx-js defaults to A4; use US Letter (12240 x 15840 DXA) for US documents\n- **Landscape: pass portrait dimensions** - docx-js swaps width/height internally; pass short edge as `width`, long edge as `height`, and set `orientation: PageOrientation.LANDSCAPE`\n- **Never use `\\n`** - use separate Paragraph elements\n- **Never use unicode bullets** - use `LevelFormat.BULLET` with numbering config\n- **PageBreak must be in Paragraph** - standalone creates invalid XML\n- **ImageRun requires `type`** - always specify png/jpg/etc\n- **Always set table `width` with DXA** - never use `WidthType.PERCENTAGE` (breaks in Google Docs)\n- **Tables need dual widths** - `columnWidths` array AND cell `width`, both must match\n- **Table width = sum of columnWidths** - for DXA, ensure they add up exactly\n- **Always add cell margins** - use `margins: { top: 80, bottom: 80, left: 120, right: 120 }` for readable padding\n- **Use `ShadingType.CLEAR`** - never SOLID for table shading\n- **TOC requires HeadingLevel only** - no custom styles on heading paragraphs\n- **Override built-in styles** - use exact IDs: \"Heading1\", \"Heading2\", etc.\n- **Include `outlineLevel`** - required for TOC (0 for H1, 1 for H2, etc.)\n\n---\n\n## Editing Existing Documents\n\n**Follow all 3 steps in order.**\n\n### Step 1: Unpack\n```bash\npython scripts/office/unpack.py document.docx unpacked/\n```\nExtracts XML, pretty-prints, merges adjacent runs, and converts smart quotes to XML entities (`“` etc.) so they survive editing. Use `--merge-runs false` to skip run merging.\n\n### Step 2: Edit XML\n\nEdit files in `unpacked/word/`. See XML Reference below for patterns.\n\n**Use \"Claude\" as the author** for tracked changes and comments, unless the user explicitly requests use of a different name.\n\n**Use the Edit tool directly for string replacement. Do not write Python scripts.** Scripts introduce unnecessary complexity. The Edit tool shows exactly what is being replaced.\n\n**CRITICAL: Use smart quotes for new content.** When adding text with apostrophes or quotes, use XML entities to produce smart quotes:\n```xml\n\nHere’s a quote: “Hello”\n```\n| Entity | Character |\n|--------|-----------|\n| `‘` | ‘ (left single) |\n| `’` | ’ (right single / apostrophe) |\n| `“` | “ (left double) |\n| `”` | ” (right double) |\n\n**Adding comments:** Use `comment.py` to handle boilerplate across multiple XML files (text must be pre-escaped XML):\n```bash\npython scripts/comment.py unpacked/ 0 \"Comment text with & and ’\"\npython scripts/comment.py unpacked/ 1 \"Reply text\" --parent 0 # reply to comment 0\npython scripts/comment.py unpacked/ 0 \"Text\" --author \"Custom Author\" # custom author name\n```\nThen add markers to document.xml (see Comments in XML Reference).\n\n### Step 3: Pack\n```bash\npython scripts/office/pack.py unpacked/ output.docx --original document.docx\n```\nValidates with auto-repair, condenses XML, and creates DOCX. Use `--validate false` to skip.\n\n**Auto-repair will fix:**\n- `durableId` >= 0x7FFFFFFF (regenerates valid ID)\n- Missing `xml:space=\"preserve\"` on `` with whitespace\n\n**Auto-repair won't fix:**\n- Malformed XML, invalid element nesting, missing relationships, schema violations\n\n### Common Pitfalls\n\n- **Replace entire `` elements**: When adding tracked changes, replace the whole `...` block with `......` as siblings. Don't inject tracked change tags inside a run.\n- **Preserve `` formatting**: Copy the original run's `` block into your tracked change runs to maintain bold, font size, etc.\n\n---\n\n## XML Reference\n\n### Schema Compliance\n\n- **Element order in ``**: ``, ``, ``, ``, ``, `` last\n- **Whitespace**: Add `xml:space=\"preserve\"` to `` with leading/trailing spaces\n- **RSIDs**: Must be 8-digit hex (e.g., `00AB1234`)\n\n### Tracked Changes\n\n**Insertion:**\n```xml\n\n inserted text\n\n```\n\n**Deletion:**\n```xml\n\n deleted text\n\n```\n\n**Inside ``**: Use `` instead of ``, and `` instead of ``.\n\n**Minimal edits** - only mark what changes:\n```xml\n\nThe term is \n\n 30\n\n\n 60\n\n days.\n```\n\n**Deleting entire paragraphs/list items** - when removing ALL content from a paragraph, also mark the paragraph mark as deleted so it merges with the next paragraph. Add `` inside ``:\n```xml\n\n \n ... \n \n \n \n \n \n Entire paragraph content being deleted...\n \n\n```\nWithout the `` in ``, accepting changes leaves an empty paragraph/list item.\n\n**Rejecting another author's insertion** - nest deletion inside their insertion:\n```xml\n\n \n their inserted text\n \n\n```\n\n**Restoring another author's deletion** - add insertion after (don't modify their deletion):\n```xml\n\n deleted text\n\n\n deleted text\n\n```\n\n### Comments\n\nAfter running `comment.py` (see Step 2), add markers to document.xml. For replies, use `--parent` flag and nest markers inside the parent's.\n\n**CRITICAL: `` and `` are siblings of ``, never inside ``.**\n\n```xml\n\n\n\n deleted\n\n more text\n\n\n\n\n\n \n text\n \n\n\n\n```\n\n### Images\n\n1. Add image file to `word/media/`\n2. Add relationship to `word/_rels/document.xml.rels`:\n```xml\n\n```\n3. Add content type to `[Content_Types].xml`:\n```xml\n\n```\n4. Reference in document.xml:\n```xml\n\n \n \n \n \n \n \n \n \n \n \n\n```\n\n---\n\n## Dependencies\n\n- **pandoc**: Text extraction\n- **docx**: `npm install -g docx` (new documents)\n- **LibreOffice**: PDF conversion (auto-configured for sandboxed environments via `scripts/office/soffice.py`)\n- **Poppler**: `pdftoppm` for images\n" + "content": "---\nname: docx\ndescription: \"Use this skill whenever the user wants to create, read, edit, or manipulate Word documents (.docx files). Triggers include: any mention of 'Word doc', 'word document', '.docx', or requests to produce professional documents with formatting like tables of contents, headings, page numbers, or letterheads. Also use when extracting or reorganizing content from .docx files, inserting or replacing images in documents, performing find-and-replace in Word files, working with tracked changes or comments, or converting content into a polished Word document. If the user asks for a 'report', 'memo', 'letter', 'template', or similar deliverable as a Word or .docx file, use this skill. Do NOT use for PDFs, spreadsheets, Google Docs, or general coding tasks unrelated to document generation.\"\nlicense: Proprietary. LICENSE.txt has complete terms\n---\n\n# DOCX creation, editing, and analysis\n\n## Overview\n\nA .docx file is a ZIP archive containing XML files.\n\n## Quick Reference\n\n| Task | Approach |\n|------|----------|\n| Read/analyze content | `pandoc` or unpack for raw XML |\n| Create new document | Use `docx-js` - see Creating New Documents below |\n| Edit existing document | Unpack → edit XML → repack - see Editing Existing Documents below |\n\n### Converting .doc to .docx\n\nLegacy `.doc` files must be converted before editing:\n\n```bash\npython scripts/office/soffice.py --headless --convert-to docx document.doc\n```\n\n### Reading Content\n\n```bash\n# Text extraction with tracked changes\npandoc --track-changes=all document.docx -o output.md\n\n# Raw XML access\npython scripts/office/unpack.py document.docx unpacked/\n```\n\n### Converting to Images\n\n```bash\npython scripts/office/soffice.py --headless --convert-to pdf document.docx\npdftoppm -jpeg -r 150 document.pdf page\n```\n\n### Accepting Tracked Changes\n\nTo produce a clean document with all tracked changes accepted (requires LibreOffice):\n\n```bash\npython scripts/accept_changes.py input.docx output.docx\n```\n\n---\n\n## Creating New Documents\n\nGenerate .docx files with JavaScript, then validate. Install: `npm install -g docx`\n\n### Setup\n```javascript\nconst { Document, Packer, Paragraph, TextRun, Table, TableRow, TableCell, ImageRun,\n Header, Footer, AlignmentType, PageOrientation, LevelFormat, ExternalHyperlink,\n InternalHyperlink, Bookmark, FootnoteReferenceRun, PositionalTab,\n PositionalTabAlignment, PositionalTabRelativeTo, PositionalTabLeader,\n TabStopType, TabStopPosition, Column, SectionType,\n TableOfContents, HeadingLevel, BorderStyle, WidthType, ShadingType,\n VerticalAlign, PageNumber, PageBreak } = require('docx');\n\nconst doc = new Document({ sections: [{ children: [/* content */] }] });\nPacker.toBuffer(doc).then(buffer => fs.writeFileSync(\"doc.docx\", buffer));\n```\n\n### Validation\nAfter creating the file, validate it. If validation fails, unpack, fix the XML, and repack.\n```bash\npython scripts/office/validate.py doc.docx\n```\n\n### Page Size\n\n```javascript\n// CRITICAL: docx-js defaults to A4, not US Letter\n// Always set page size explicitly for consistent results\nsections: [{\n properties: {\n page: {\n size: {\n width: 12240, // 8.5 inches in DXA\n height: 15840 // 11 inches in DXA\n },\n margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } // 1 inch margins\n }\n },\n children: [/* content */]\n}]\n```\n\n**Common page sizes (DXA units, 1440 DXA = 1 inch):**\n\n| Paper | Width | Height | Content Width (1\" margins) |\n|-------|-------|--------|---------------------------|\n| US Letter | 12,240 | 15,840 | 9,360 |\n| A4 (default) | 11,906 | 16,838 | 9,026 |\n\n**Landscape orientation:** docx-js swaps width/height internally, so pass portrait dimensions and let it handle the swap:\n```javascript\nsize: {\n width: 12240, // Pass SHORT edge as width\n height: 15840, // Pass LONG edge as height\n orientation: PageOrientation.LANDSCAPE // docx-js swaps them in the XML\n},\n// Content width = 15840 - left margin - right margin (uses the long edge)\n```\n\n### Styles (Override Built-in Headings)\n\nUse Arial as the default font (universally supported). Keep titles black for readability.\n\n```javascript\nconst doc = new Document({\n styles: {\n default: { document: { run: { font: \"Arial\", size: 24 } } }, // 12pt default\n paragraphStyles: [\n // IMPORTANT: Use exact IDs to override built-in styles\n { id: \"Heading1\", name: \"Heading 1\", basedOn: \"Normal\", next: \"Normal\", quickFormat: true,\n run: { size: 32, bold: true, font: \"Arial\" },\n paragraph: { spacing: { before: 240, after: 240 }, outlineLevel: 0 } }, // outlineLevel required for TOC\n { id: \"Heading2\", name: \"Heading 2\", basedOn: \"Normal\", next: \"Normal\", quickFormat: true,\n run: { size: 28, bold: true, font: \"Arial\" },\n paragraph: { spacing: { before: 180, after: 180 }, outlineLevel: 1 } },\n ]\n },\n sections: [{\n children: [\n new Paragraph({ heading: HeadingLevel.HEADING_1, children: [new TextRun(\"Title\")] }),\n ]\n }]\n});\n```\n\n### Lists (NEVER use unicode bullets)\n\n```javascript\n// ❌ WRONG - never manually insert bullet characters\nnew Paragraph({ children: [new TextRun(\"• Item\")] }) // BAD\nnew Paragraph({ children: [new TextRun(\"\\u2022 Item\")] }) // BAD\n\n// ✅ CORRECT - use numbering config with LevelFormat.BULLET\nconst doc = new Document({\n numbering: {\n config: [\n { reference: \"bullets\",\n levels: [{ level: 0, format: LevelFormat.BULLET, text: \"•\", alignment: AlignmentType.LEFT,\n style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] },\n { reference: \"numbers\",\n levels: [{ level: 0, format: LevelFormat.DECIMAL, text: \"%1.\", alignment: AlignmentType.LEFT,\n style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] },\n ]\n },\n sections: [{\n children: [\n new Paragraph({ numbering: { reference: \"bullets\", level: 0 },\n children: [new TextRun(\"Bullet item\")] }),\n new Paragraph({ numbering: { reference: \"numbers\", level: 0 },\n children: [new TextRun(\"Numbered item\")] }),\n ]\n }]\n});\n\n// ⚠️ Each reference creates INDEPENDENT numbering\n// Same reference = continues (1,2,3 then 4,5,6)\n// Different reference = restarts (1,2,3 then 1,2,3)\n```\n\n### Tables\n\n**CRITICAL: Tables need dual widths** - set both `columnWidths` on the table AND `width` on each cell. Without both, tables render incorrectly on some platforms.\n\n```javascript\n// CRITICAL: Always set table width for consistent rendering\n// CRITICAL: Use ShadingType.CLEAR (not SOLID) to prevent black backgrounds\nconst border = { style: BorderStyle.SINGLE, size: 1, color: \"CCCCCC\" };\nconst borders = { top: border, bottom: border, left: border, right: border };\n\nnew Table({\n width: { size: 9360, type: WidthType.DXA }, // Always use DXA (percentages break in Google Docs)\n columnWidths: [4680, 4680], // Must sum to table width (DXA: 1440 = 1 inch)\n rows: [\n new TableRow({\n children: [\n new TableCell({\n borders,\n width: { size: 4680, type: WidthType.DXA }, // Also set on each cell\n shading: { fill: \"D5E8F0\", type: ShadingType.CLEAR }, // CLEAR not SOLID\n margins: { top: 80, bottom: 80, left: 120, right: 120 }, // Cell padding (internal, not added to width)\n children: [new Paragraph({ children: [new TextRun(\"Cell\")] })]\n })\n ]\n })\n ]\n})\n```\n\n**Table width calculation:**\n\nAlways use `WidthType.DXA` — `WidthType.PERCENTAGE` breaks in Google Docs.\n\n```javascript\n// Table width = sum of columnWidths = content width\n// US Letter with 1\" margins: 12240 - 2880 = 9360 DXA\nwidth: { size: 9360, type: WidthType.DXA },\ncolumnWidths: [7000, 2360] // Must sum to table width\n```\n\n**Width rules:**\n- **Always use `WidthType.DXA`** — never `WidthType.PERCENTAGE` (incompatible with Google Docs)\n- Table width must equal the sum of `columnWidths`\n- Cell `width` must match corresponding `columnWidth`\n- Cell `margins` are internal padding - they reduce content area, not add to cell width\n- For full-width tables: use content width (page width minus left and right margins)\n\n### Images\n\n```javascript\n// CRITICAL: type parameter is REQUIRED\nnew Paragraph({\n children: [new ImageRun({\n type: \"png\", // Required: png, jpg, jpeg, gif, bmp, svg\n data: fs.readFileSync(\"image.png\"),\n transformation: { width: 200, height: 150 },\n altText: { title: \"Title\", description: \"Desc\", name: \"Name\" } // All three required\n })]\n})\n```\n\n### Page Breaks\n\n```javascript\n// CRITICAL: PageBreak must be inside a Paragraph\nnew Paragraph({ children: [new PageBreak()] })\n\n// Or use pageBreakBefore\nnew Paragraph({ pageBreakBefore: true, children: [new TextRun(\"New page\")] })\n```\n\n### Hyperlinks\n\n```javascript\n// External link\nnew Paragraph({\n children: [new ExternalHyperlink({\n children: [new TextRun({ text: \"Click here\", style: \"Hyperlink\" })],\n link: \"https://example.com\",\n })]\n})\n\n// Internal link (bookmark + reference)\n// 1. Create bookmark at destination\nnew Paragraph({ heading: HeadingLevel.HEADING_1, children: [\n new Bookmark({ id: \"chapter1\", children: [new TextRun(\"Chapter 1\")] }),\n]})\n// 2. Link to it\nnew Paragraph({ children: [new InternalHyperlink({\n children: [new TextRun({ text: \"See Chapter 1\", style: \"Hyperlink\" })],\n anchor: \"chapter1\",\n})]})\n```\n\n### Footnotes\n\n```javascript\nconst doc = new Document({\n footnotes: {\n 1: { children: [new Paragraph(\"Source: Annual Report 2024\")] },\n 2: { children: [new Paragraph(\"See appendix for methodology\")] },\n },\n sections: [{\n children: [new Paragraph({\n children: [\n new TextRun(\"Revenue grew 15%\"),\n new FootnoteReferenceRun(1),\n new TextRun(\" using adjusted metrics\"),\n new FootnoteReferenceRun(2),\n ],\n })]\n }]\n});\n```\n\n### Tab Stops\n\n```javascript\n// Right-align text on same line (e.g., date opposite a title)\nnew Paragraph({\n children: [\n new TextRun(\"Company Name\"),\n new TextRun(\"\\tJanuary 2025\"),\n ],\n tabStops: [{ type: TabStopType.RIGHT, position: TabStopPosition.MAX }],\n})\n\n// Dot leader (e.g., TOC-style)\nnew Paragraph({\n children: [\n new TextRun(\"Introduction\"),\n new TextRun({ children: [\n new PositionalTab({\n alignment: PositionalTabAlignment.RIGHT,\n relativeTo: PositionalTabRelativeTo.MARGIN,\n leader: PositionalTabLeader.DOT,\n }),\n \"3\",\n ]}),\n ],\n})\n```\n\n### Multi-Column Layouts\n\n```javascript\n// Equal-width columns\nsections: [{\n properties: {\n column: {\n count: 2, // number of columns\n space: 720, // gap between columns in DXA (720 = 0.5 inch)\n equalWidth: true,\n separate: true, // vertical line between columns\n },\n },\n children: [/* content flows naturally across columns */]\n}]\n\n// Custom-width columns (equalWidth must be false)\nsections: [{\n properties: {\n column: {\n equalWidth: false,\n children: [\n new Column({ width: 5400, space: 720 }),\n new Column({ width: 3240 }),\n ],\n },\n },\n children: [/* content */]\n}]\n```\n\nForce a column break with a new section using `type: SectionType.NEXT_COLUMN`.\n\n### Table of Contents\n\n```javascript\n// CRITICAL: Headings must use HeadingLevel ONLY - no custom styles\nnew TableOfContents(\"Table of Contents\", { hyperlink: true, headingStyleRange: \"1-3\" })\n```\n\n### Headers/Footers\n\n```javascript\nsections: [{\n properties: {\n page: { margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } } // 1440 = 1 inch\n },\n headers: {\n default: new Header({ children: [new Paragraph({ children: [new TextRun(\"Header\")] })] })\n },\n footers: {\n default: new Footer({ children: [new Paragraph({\n children: [new TextRun(\"Page \"), new TextRun({ children: [PageNumber.CURRENT] })]\n })] })\n },\n children: [/* content */]\n}]\n```\n\n### Critical Rules for docx-js\n\n- **Set page size explicitly** - docx-js defaults to A4; use US Letter (12240 x 15840 DXA) for US documents\n- **Landscape: pass portrait dimensions** - docx-js swaps width/height internally; pass short edge as `width`, long edge as `height`, and set `orientation: PageOrientation.LANDSCAPE`\n- **Never use `\\n`** - use separate Paragraph elements\n- **Never use unicode bullets** - use `LevelFormat.BULLET` with numbering config\n- **PageBreak must be in Paragraph** - standalone creates invalid XML\n- **ImageRun requires `type`** - always specify png/jpg/etc\n- **Always set table `width` with DXA** - never use `WidthType.PERCENTAGE` (breaks in Google Docs)\n- **Tables need dual widths** - `columnWidths` array AND cell `width`, both must match\n- **Table width = sum of columnWidths** - for DXA, ensure they add up exactly\n- **Always add cell margins** - use `margins: { top: 80, bottom: 80, left: 120, right: 120 }` for readable padding\n- **Use `ShadingType.CLEAR`** - never SOLID for table shading\n- **Never use tables as dividers/rules** - cells have minimum height and render as empty boxes (including in headers/footers); use `border: { bottom: { style: BorderStyle.SINGLE, size: 6, color: \"2E75B6\", space: 1 } }` on a Paragraph instead. For two-column footers, use tab stops (see Tab Stops section), not tables\n- **TOC requires HeadingLevel only** - no custom styles on heading paragraphs\n- **Override built-in styles** - use exact IDs: \"Heading1\", \"Heading2\", etc.\n- **Include `outlineLevel`** - required for TOC (0 for H1, 1 for H2, etc.)\n\n---\n\n## Editing Existing Documents\n\n**Follow all 3 steps in order.**\n\n### Step 1: Unpack\n```bash\npython scripts/office/unpack.py document.docx unpacked/\n```\nExtracts XML, pretty-prints, merges adjacent runs, and converts smart quotes to XML entities (`“` etc.) so they survive editing. Use `--merge-runs false` to skip run merging.\n\n### Step 2: Edit XML\n\nEdit files in `unpacked/word/`. See XML Reference below for patterns.\n\n**Use \"Claude\" as the author** for tracked changes and comments, unless the user explicitly requests use of a different name.\n\n**Use the Edit tool directly for string replacement. Do not write Python scripts.** Scripts introduce unnecessary complexity. The Edit tool shows exactly what is being replaced.\n\n**CRITICAL: Use smart quotes for new content.** When adding text with apostrophes or quotes, use XML entities to produce smart quotes:\n```xml\n\nHere’s a quote: “Hello”\n```\n| Entity | Character |\n|--------|-----------|\n| `‘` | ‘ (left single) |\n| `’` | ’ (right single / apostrophe) |\n| `“` | “ (left double) |\n| `”` | ” (right double) |\n\n**Adding comments:** Use `comment.py` to handle boilerplate across multiple XML files (text must be pre-escaped XML):\n```bash\npython scripts/comment.py unpacked/ 0 \"Comment text with & and ’\"\npython scripts/comment.py unpacked/ 1 \"Reply text\" --parent 0 # reply to comment 0\npython scripts/comment.py unpacked/ 0 \"Text\" --author \"Custom Author\" # custom author name\n```\nThen add markers to document.xml (see Comments in XML Reference).\n\n### Step 3: Pack\n```bash\npython scripts/office/pack.py unpacked/ output.docx --original document.docx\n```\nValidates with auto-repair, condenses XML, and creates DOCX. Use `--validate false` to skip.\n\n**Auto-repair will fix:**\n- `durableId` >= 0x7FFFFFFF (regenerates valid ID)\n- Missing `xml:space=\"preserve\"` on `` with whitespace\n\n**Auto-repair won't fix:**\n- Malformed XML, invalid element nesting, missing relationships, schema violations\n\n### Common Pitfalls\n\n- **Replace entire `` elements**: When adding tracked changes, replace the whole `...` block with `......` as siblings. Don't inject tracked change tags inside a run.\n- **Preserve `` formatting**: Copy the original run's `` block into your tracked change runs to maintain bold, font size, etc.\n\n---\n\n## XML Reference\n\n### Schema Compliance\n\n- **Element order in ``**: ``, ``, ``, ``, ``, `` last\n- **Whitespace**: Add `xml:space=\"preserve\"` to `` with leading/trailing spaces\n- **RSIDs**: Must be 8-digit hex (e.g., `00AB1234`)\n\n### Tracked Changes\n\n**Insertion:**\n```xml\n\n inserted text\n\n```\n\n**Deletion:**\n```xml\n\n deleted text\n\n```\n\n**Inside ``**: Use `` instead of ``, and `` instead of ``.\n\n**Minimal edits** - only mark what changes:\n```xml\n\nThe term is \n\n 30\n\n\n 60\n\n days.\n```\n\n**Deleting entire paragraphs/list items** - when removing ALL content from a paragraph, also mark the paragraph mark as deleted so it merges with the next paragraph. Add `` inside ``:\n```xml\n\n \n ... \n \n \n \n \n \n Entire paragraph content being deleted...\n \n\n```\nWithout the `` in ``, accepting changes leaves an empty paragraph/list item.\n\n**Rejecting another author's insertion** - nest deletion inside their insertion:\n```xml\n\n \n their inserted text\n \n\n```\n\n**Restoring another author's deletion** - add insertion after (don't modify their deletion):\n```xml\n\n deleted text\n\n\n deleted text\n\n```\n\n### Comments\n\nAfter running `comment.py` (see Step 2), add markers to document.xml. For replies, use `--parent` flag and nest markers inside the parent's.\n\n**CRITICAL: `` and `` are siblings of ``, never inside ``.**\n\n```xml\n\n\n\n deleted\n\n more text\n\n\n\n\n\n \n text\n \n\n\n\n```\n\n### Images\n\n1. Add image file to `word/media/`\n2. Add relationship to `word/_rels/document.xml.rels`:\n```xml\n\n```\n3. Add content type to `[Content_Types].xml`:\n```xml\n\n```\n4. Reference in document.xml:\n```xml\n\n \n \n \n \n \n \n \n \n \n \n\n```\n\n---\n\n## Dependencies\n\n- **pandoc**: Text extraction\n- **docx**: `npm install -g docx` (new documents)\n- **LibreOffice**: PDF conversion (auto-configured for sandboxed environments via `scripts/office/soffice.py`)\n- **Poppler**: `pdftoppm` for images\n" }, { "name": "scripts", diff --git a/web/app/components/workflow/skill/start-tab/templates/skills/skill-creator.ts b/web/app/components/workflow/skill/start-tab/templates/skills/skill-creator.ts index 842b707d2f..f7bb4d7414 100644 --- a/web/app/components/workflow/skill/start-tab/templates/skills/skill-creator.ts +++ b/web/app/components/workflow/skill/start-tab/templates/skills/skill-creator.ts @@ -6,21 +6,64 @@ const children: SkillTemplateNode[] = [ { "name": "SKILL.md", "node_type": "file", - "content": "---\nname: skill-creator\ndescription: Guide for creating effective skills. This skill should be used when users want to create a new skill (or update an existing skill) that extends Claude's capabilities with specialized knowledge, workflows, or tool integrations.\nlicense: Complete terms in LICENSE.txt\n---\n\n# Skill Creator\n\nThis skill provides guidance for creating effective skills.\n\n## About Skills\n\nSkills are modular, self-contained packages that extend Claude's capabilities by providing\nspecialized knowledge, workflows, and tools. Think of them as \"onboarding guides\" for specific\ndomains or tasks—they transform Claude from a general-purpose agent into a specialized agent\nequipped with procedural knowledge that no model can fully possess.\n\n### What Skills Provide\n\n1. Specialized workflows - Multi-step procedures for specific domains\n2. Tool integrations - Instructions for working with specific file formats or APIs\n3. Domain expertise - Company-specific knowledge, schemas, business logic\n4. Bundled resources - Scripts, references, and assets for complex and repetitive tasks\n\n## Core Principles\n\n### Concise is Key\n\nThe context window is a public good. Skills share the context window with everything else Claude needs: system prompt, conversation history, other Skills' metadata, and the actual user request.\n\n**Default assumption: Claude is already very smart.** Only add context Claude doesn't already have. Challenge each piece of information: \"Does Claude really need this explanation?\" and \"Does this paragraph justify its token cost?\"\n\nPrefer concise examples over verbose explanations.\n\n### Set Appropriate Degrees of Freedom\n\nMatch the level of specificity to the task's fragility and variability:\n\n**High freedom (text-based instructions)**: Use when multiple approaches are valid, decisions depend on context, or heuristics guide the approach.\n\n**Medium freedom (pseudocode or scripts with parameters)**: Use when a preferred pattern exists, some variation is acceptable, or configuration affects behavior.\n\n**Low freedom (specific scripts, few parameters)**: Use when operations are fragile and error-prone, consistency is critical, or a specific sequence must be followed.\n\nThink of Claude as exploring a path: a narrow bridge with cliffs needs specific guardrails (low freedom), while an open field allows many routes (high freedom).\n\n### Anatomy of a Skill\n\nEvery skill consists of a required SKILL.md file and optional bundled resources:\n\n```\nskill-name/\n├── SKILL.md (required)\n│ ├── YAML frontmatter metadata (required)\n│ │ ├── name: (required)\n│ │ └── description: (required)\n│ └── Markdown instructions (required)\n└── Bundled Resources (optional)\n ├── scripts/ - Executable code (Python/Bash/etc.)\n ├── references/ - Documentation intended to be loaded into context as needed\n └── assets/ - Files used in output (templates, icons, fonts, etc.)\n```\n\n#### SKILL.md (required)\n\nEvery SKILL.md consists of:\n\n- **Frontmatter** (YAML): Contains `name` and `description` fields. These are the only fields that Claude reads to determine when the skill gets used, thus it is very important to be clear and comprehensive in describing what the skill is, and when it should be used.\n- **Body** (Markdown): Instructions and guidance for using the skill. Only loaded AFTER the skill triggers (if at all).\n\n#### Bundled Resources (optional)\n\n##### Scripts (`scripts/`)\n\nExecutable code (Python/Bash/etc.) for tasks that require deterministic reliability or are repeatedly rewritten.\n\n- **When to include**: When the same code is being rewritten repeatedly or deterministic reliability is needed\n- **Example**: `scripts/rotate_pdf.py` for PDF rotation tasks\n- **Benefits**: Token efficient, deterministic, may be executed without loading into context\n- **Note**: Scripts may still need to be read by Claude for patching or environment-specific adjustments\n\n##### References (`references/`)\n\nDocumentation and reference material intended to be loaded as needed into context to inform Claude's process and thinking.\n\n- **When to include**: For documentation that Claude should reference while working\n- **Examples**: `references/finance.md` for financial schemas, `references/mnda.md` for company NDA template, `references/policies.md` for company policies, `references/api_docs.md` for API specifications\n- **Use cases**: Database schemas, API documentation, domain knowledge, company policies, detailed workflow guides\n- **Benefits**: Keeps SKILL.md lean, loaded only when Claude determines it's needed\n- **Best practice**: If files are large (>10k words), include grep search patterns in SKILL.md\n- **Avoid duplication**: Information should live in either SKILL.md or references files, not both. Prefer references files for detailed information unless it's truly core to the skill—this keeps SKILL.md lean while making information discoverable without hogging the context window. Keep only essential procedural instructions and workflow guidance in SKILL.md; move detailed reference material, schemas, and examples to references files.\n\n##### Assets (`assets/`)\n\nFiles not intended to be loaded into context, but rather used within the output Claude produces.\n\n- **When to include**: When the skill needs files that will be used in the final output\n- **Examples**: `assets/logo.png` for brand assets, `assets/slides.pptx` for PowerPoint templates, `assets/frontend-template/` for HTML/React boilerplate, `assets/font.ttf` for typography\n- **Use cases**: Templates, images, icons, boilerplate code, fonts, sample documents that get copied or modified\n- **Benefits**: Separates output resources from documentation, enables Claude to use files without loading them into context\n\n#### What to Not Include in a Skill\n\nA skill should only contain essential files that directly support its functionality. Do NOT create extraneous documentation or auxiliary files, including:\n\n- README.md\n- INSTALLATION_GUIDE.md\n- QUICK_REFERENCE.md\n- CHANGELOG.md\n- etc.\n\nThe skill should only contain the information needed for an AI agent to do the job at hand. It should not contain auxilary context about the process that went into creating it, setup and testing procedures, user-facing documentation, etc. Creating additional documentation files just adds clutter and confusion.\n\n### Progressive Disclosure Design Principle\n\nSkills use a three-level loading system to manage context efficiently:\n\n1. **Metadata (name + description)** - Always in context (~100 words)\n2. **SKILL.md body** - When skill triggers (<5k words)\n3. **Bundled resources** - As needed by Claude (Unlimited because scripts can be executed without reading into context window)\n\n#### Progressive Disclosure Patterns\n\nKeep SKILL.md body to the essentials and under 500 lines to minimize context bloat. Split content into separate files when approaching this limit. When splitting out content into other files, it is very important to reference them from SKILL.md and describe clearly when to read them, to ensure the reader of the skill knows they exist and when to use them.\n\n**Key principle:** When a skill supports multiple variations, frameworks, or options, keep only the core workflow and selection guidance in SKILL.md. Move variant-specific details (patterns, examples, configuration) into separate reference files.\n\n**Pattern 1: High-level guide with references**\n\n```markdown\n# PDF Processing\n\n## Quick start\n\nExtract text with pdfplumber:\n[code example]\n\n## Advanced features\n\n- **Form filling**: See [FORMS.md](FORMS.md) for complete guide\n- **API reference**: See [REFERENCE.md](REFERENCE.md) for all methods\n- **Examples**: See [EXAMPLES.md](EXAMPLES.md) for common patterns\n```\n\nClaude loads FORMS.md, REFERENCE.md, or EXAMPLES.md only when needed.\n\n**Pattern 2: Domain-specific organization**\n\nFor Skills with multiple domains, organize content by domain to avoid loading irrelevant context:\n\n```\nbigquery-skill/\n├── SKILL.md (overview and navigation)\n└── reference/\n ├── finance.md (revenue, billing metrics)\n ├── sales.md (opportunities, pipeline)\n ├── product.md (API usage, features)\n └── marketing.md (campaigns, attribution)\n```\n\nWhen a user asks about sales metrics, Claude only reads sales.md.\n\nSimilarly, for skills supporting multiple frameworks or variants, organize by variant:\n\n```\ncloud-deploy/\n├── SKILL.md (workflow + provider selection)\n└── references/\n ├── aws.md (AWS deployment patterns)\n ├── gcp.md (GCP deployment patterns)\n └── azure.md (Azure deployment patterns)\n```\n\nWhen the user chooses AWS, Claude only reads aws.md.\n\n**Pattern 3: Conditional details**\n\nShow basic content, link to advanced content:\n\n```markdown\n# DOCX Processing\n\n## Creating documents\n\nUse docx-js for new documents. See [DOCX-JS.md](DOCX-JS.md).\n\n## Editing documents\n\nFor simple edits, modify the XML directly.\n\n**For tracked changes**: See [REDLINING.md](REDLINING.md)\n**For OOXML details**: See [OOXML.md](OOXML.md)\n```\n\nClaude reads REDLINING.md or OOXML.md only when the user needs those features.\n\n**Important guidelines:**\n\n- **Avoid deeply nested references** - Keep references one level deep from SKILL.md. All reference files should link directly from SKILL.md.\n- **Structure longer reference files** - For files longer than 100 lines, include a table of contents at the top so Claude can see the full scope when previewing.\n\n## Skill Creation Process\n\nSkill creation involves these steps:\n\n1. Understand the skill with concrete examples\n2. Plan reusable skill contents (scripts, references, assets)\n3. Initialize the skill (run init_skill.py)\n4. Edit the skill (implement resources and write SKILL.md)\n5. Package the skill (run package_skill.py)\n6. Iterate based on real usage\n\nFollow these steps in order, skipping only if there is a clear reason why they are not applicable.\n\n### Step 1: Understanding the Skill with Concrete Examples\n\nSkip this step only when the skill's usage patterns are already clearly understood. It remains valuable even when working with an existing skill.\n\nTo create an effective skill, clearly understand concrete examples of how the skill will be used. This understanding can come from either direct user examples or generated examples that are validated with user feedback.\n\nFor example, when building an image-editor skill, relevant questions include:\n\n- \"What functionality should the image-editor skill support? Editing, rotating, anything else?\"\n- \"Can you give some examples of how this skill would be used?\"\n- \"I can imagine users asking for things like 'Remove the red-eye from this image' or 'Rotate this image'. Are there other ways you imagine this skill being used?\"\n- \"What would a user say that should trigger this skill?\"\n\nTo avoid overwhelming users, avoid asking too many questions in a single message. Start with the most important questions and follow up as needed for better effectiveness.\n\nConclude this step when there is a clear sense of the functionality the skill should support.\n\n### Step 2: Planning the Reusable Skill Contents\n\nTo turn concrete examples into an effective skill, analyze each example by:\n\n1. Considering how to execute on the example from scratch\n2. Identifying what scripts, references, and assets would be helpful when executing these workflows repeatedly\n\nExample: When building a `pdf-editor` skill to handle queries like \"Help me rotate this PDF,\" the analysis shows:\n\n1. Rotating a PDF requires re-writing the same code each time\n2. A `scripts/rotate_pdf.py` script would be helpful to store in the skill\n\nExample: When designing a `frontend-webapp-builder` skill for queries like \"Build me a todo app\" or \"Build me a dashboard to track my steps,\" the analysis shows:\n\n1. Writing a frontend webapp requires the same boilerplate HTML/React each time\n2. An `assets/hello-world/` template containing the boilerplate HTML/React project files would be helpful to store in the skill\n\nExample: When building a `big-query` skill to handle queries like \"How many users have logged in today?\" the analysis shows:\n\n1. Querying BigQuery requires re-discovering the table schemas and relationships each time\n2. A `references/schema.md` file documenting the table schemas would be helpful to store in the skill\n\nTo establish the skill's contents, analyze each concrete example to create a list of the reusable resources to include: scripts, references, and assets.\n\n### Step 3: Initializing the Skill\n\nAt this point, it is time to actually create the skill.\n\nSkip this step only if the skill being developed already exists, and iteration or packaging is needed. In this case, continue to the next step.\n\nWhen creating a new skill from scratch, always run the `init_skill.py` script. The script conveniently generates a new template skill directory that automatically includes everything a skill requires, making the skill creation process much more efficient and reliable.\n\nUsage:\n\n```bash\nscripts/init_skill.py --path \n```\n\nThe script:\n\n- Creates the skill directory at the specified path\n- Generates a SKILL.md template with proper frontmatter and TODO placeholders\n- Creates example resource directories: `scripts/`, `references/`, and `assets/`\n- Adds example files in each directory that can be customized or deleted\n\nAfter initialization, customize or remove the generated SKILL.md and example files as needed.\n\n### Step 4: Edit the Skill\n\nWhen editing the (newly-generated or existing) skill, remember that the skill is being created for another instance of Claude to use. Include information that would be beneficial and non-obvious to Claude. Consider what procedural knowledge, domain-specific details, or reusable assets would help another Claude instance execute these tasks more effectively.\n\n#### Learn Proven Design Patterns\n\nConsult these helpful guides based on your skill's needs:\n\n- **Multi-step processes**: See references/workflows.md for sequential workflows and conditional logic\n- **Specific output formats or quality standards**: See references/output-patterns.md for template and example patterns\n\nThese files contain established best practices for effective skill design.\n\n#### Start with Reusable Skill Contents\n\nTo begin implementation, start with the reusable resources identified above: `scripts/`, `references/`, and `assets/` files. Note that this step may require user input. For example, when implementing a `brand-guidelines` skill, the user may need to provide brand assets or templates to store in `assets/`, or documentation to store in `references/`.\n\nAdded scripts must be tested by actually running them to ensure there are no bugs and that the output matches what is expected. If there are many similar scripts, only a representative sample needs to be tested to ensure confidence that they all work while balancing time to completion.\n\nAny example files and directories not needed for the skill should be deleted. The initialization script creates example files in `scripts/`, `references/`, and `assets/` to demonstrate structure, but most skills won't need all of them.\n\n#### Update SKILL.md\n\n**Writing Guidelines:** Always use imperative/infinitive form.\n\n##### Frontmatter\n\nWrite the YAML frontmatter with `name` and `description`:\n\n- `name`: The skill name\n- `description`: This is the primary triggering mechanism for your skill, and helps Claude understand when to use the skill.\n - Include both what the Skill does and specific triggers/contexts for when to use it.\n - Include all \"when to use\" information here - Not in the body. The body is only loaded after triggering, so \"When to Use This Skill\" sections in the body are not helpful to Claude.\n - Example description for a `docx` skill: \"Comprehensive document creation, editing, and analysis with support for tracked changes, comments, formatting preservation, and text extraction. Use when Claude needs to work with professional documents (.docx files) for: (1) Creating new documents, (2) Modifying or editing content, (3) Working with tracked changes, (4) Adding comments, or any other document tasks\"\n\nDo not include any other fields in YAML frontmatter.\n\n##### Body\n\nWrite instructions for using the skill and its bundled resources.\n\n### Step 5: Packaging a Skill\n\nOnce development of the skill is complete, it must be packaged into a distributable .skill file that gets shared with the user. The packaging process automatically validates the skill first to ensure it meets all requirements:\n\n```bash\nscripts/package_skill.py \n```\n\nOptional output directory specification:\n\n```bash\nscripts/package_skill.py ./dist\n```\n\nThe packaging script will:\n\n1. **Validate** the skill automatically, checking:\n\n - YAML frontmatter format and required fields\n - Skill naming conventions and directory structure\n - Description completeness and quality\n - File organization and resource references\n\n2. **Package** the skill if validation passes, creating a .skill file named after the skill (e.g., `my-skill.skill`) that includes all files and maintains the proper directory structure for distribution. The .skill file is a zip file with a .skill extension.\n\nIf validation fails, the script will report the errors and exit without creating a package. Fix any validation errors and run the packaging command again.\n\n### Step 6: Iterate\n\nAfter testing the skill, users may request improvements. Often this happens right after using the skill, with fresh context of how the skill performed.\n\n**Iteration workflow:**\n\n1. Use the skill on real tasks\n2. Notice struggles or inefficiencies\n3. Identify how SKILL.md or bundled resources should be updated\n4. Implement changes and test again\n" + "content": "---\nname: skill-creator\ndescription: Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, edit, or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill's description for better triggering accuracy.\n---\n\n# Skill Creator\n\nA skill for creating new skills and iteratively improving them.\n\nAt a high level, the process of creating a skill goes like this:\n\n- Decide what you want the skill to do and roughly how it should do it\n- Write a draft of the skill\n- Create a few test prompts and run claude-with-access-to-the-skill on them\n- Help the user evaluate the results both qualitatively and quantitatively\n - While the runs happen in the background, draft some quantitative evals if there aren't any (if there are some, you can either use as is or modify if you feel something needs to change about them). Then explain them to the user (or if they already existed, explain the ones that already exist)\n - Use the `eval-viewer/generate_review.py` script to show the user the results for them to look at, and also let them look at the quantitative metrics\n- Rewrite the skill based on feedback from the user's evaluation of the results (and also if there are any glaring flaws that become apparent from the quantitative benchmarks)\n- Repeat until you're satisfied\n- Expand the test set and try again at larger scale\n\nYour job when using this skill is to figure out where the user is in this process and then jump in and help them progress through these stages. So for instance, maybe they're like \"I want to make a skill for X\". You can help narrow down what they mean, write a draft, write the test cases, figure out how they want to evaluate, run all the prompts, and repeat.\n\nOn the other hand, maybe they already have a draft of the skill. In this case you can go straight to the eval/iterate part of the loop.\n\nOf course, you should always be flexible and if the user is like \"I don't need to run a bunch of evaluations, just vibe with me\", you can do that instead.\n\nThen after the skill is done (but again, the order is flexible), you can also run the skill description improver, which we have a whole separate script for, to optimize the triggering of the skill.\n\nCool? Cool.\n\n## Communicating with the user\n\nThe skill creator is liable to be used by people across a wide range of familiarity with coding jargon. If you haven't heard (and how could you, it's only very recently that it started), there's a trend now where the power of Claude is inspiring plumbers to open up their terminals, parents and grandparents to google \"how to install npm\". On the other hand, the bulk of users are probably fairly computer-literate.\n\nSo please pay attention to context cues to understand how to phrase your communication! In the default case, just to give you some idea:\n\n- \"evaluation\" and \"benchmark\" are borderline, but OK\n- for \"JSON\" and \"assertion\" you want to see serious cues from the user that they know what those things are before using them without explaining them\n\nIt's OK to briefly explain terms if you're in doubt, and feel free to clarify terms with a short definition if you're unsure if the user will get it.\n\n---\n\n## Creating a skill\n\n### Capture Intent\n\nStart by understanding the user's intent. The current conversation might already contain a workflow the user wants to capture (e.g., they say \"turn this into a skill\"). If so, extract answers from the conversation history first — the tools used, the sequence of steps, corrections the user made, input/output formats observed. The user may need to fill the gaps, and should confirm before proceeding to the next step.\n\n1. What should this skill enable Claude to do?\n2. When should this skill trigger? (what user phrases/contexts)\n3. What's the expected output format?\n4. Should we set up test cases to verify the skill works? Skills with objectively verifiable outputs (file transforms, data extraction, code generation, fixed workflow steps) benefit from test cases. Skills with subjective outputs (writing style, art) often don't need them. Suggest the appropriate default based on the skill type, but let the user decide.\n\n### Interview and Research\n\nProactively ask questions about edge cases, input/output formats, example files, success criteria, and dependencies. Wait to write test prompts until you've got this part ironed out.\n\nCheck available MCPs - if useful for research (searching docs, finding similar skills, looking up best practices), research in parallel via subagents if available, otherwise inline. Come prepared with context to reduce burden on the user.\n\n### Write the SKILL.md\n\nBased on the user interview, fill in these components:\n\n- **name**: Skill identifier\n- **description**: When to trigger, what it does. This is the primary triggering mechanism - include both what the skill does AND specific contexts for when to use it. All \"when to use\" info goes here, not in the body. Note: currently Claude has a tendency to \"undertrigger\" skills -- to not use them when they'd be useful. To combat this, please make the skill descriptions a little bit \"pushy\". So for instance, instead of \"How to build a simple fast dashboard to display internal Anthropic data.\", you might write \"How to build a simple fast dashboard to display internal Anthropic data. Make sure to use this skill whenever the user mentions dashboards, data visualization, internal metrics, or wants to display any kind of company data, even if they don't explicitly ask for a 'dashboard.'\"\n- **compatibility**: Required tools, dependencies (optional, rarely needed)\n- **the rest of the skill :)**\n\n### Skill Writing Guide\n\n#### Anatomy of a Skill\n\n```\nskill-name/\n├── SKILL.md (required)\n│ ├── YAML frontmatter (name, description required)\n│ └── Markdown instructions\n└── Bundled Resources (optional)\n ├── scripts/ - Executable code for deterministic/repetitive tasks\n ├── references/ - Docs loaded into context as needed\n └── assets/ - Files used in output (templates, icons, fonts)\n```\n\n#### Progressive Disclosure\n\nSkills use a three-level loading system:\n1. **Metadata** (name + description) - Always in context (~100 words)\n2. **SKILL.md body** - In context whenever skill triggers (<500 lines ideal)\n3. **Bundled resources** - As needed (unlimited, scripts can execute without loading)\n\nThese word counts are approximate and you can feel free to go longer if needed.\n\n**Key patterns:**\n- Keep SKILL.md under 500 lines; if you're approaching this limit, add an additional layer of hierarchy along with clear pointers about where the model using the skill should go next to follow up.\n- Reference files clearly from SKILL.md with guidance on when to read them\n- For large reference files (>300 lines), include a table of contents\n\n**Domain organization**: When a skill supports multiple domains/frameworks, organize by variant:\n```\ncloud-deploy/\n├── SKILL.md (workflow + selection)\n└── references/\n ├── aws.md\n ├── gcp.md\n └── azure.md\n```\nClaude reads only the relevant reference file.\n\n#### Principle of Lack of Surprise\n\nThis goes without saying, but skills must not contain malware, exploit code, or any content that could compromise system security. A skill's contents should not surprise the user in their intent if described. Don't go along with requests to create misleading skills or skills designed to facilitate unauthorized access, data exfiltration, or other malicious activities. Things like a \"roleplay as an XYZ\" are OK though.\n\n#### Writing Patterns\n\nPrefer using the imperative form in instructions.\n\n**Defining output formats** - You can do it like this:\n```markdown\n## Report structure\nALWAYS use this exact template:\n# [Title]\n## Executive summary\n## Key findings\n## Recommendations\n```\n\n**Examples pattern** - It's useful to include examples. You can format them like this (but if \"Input\" and \"Output\" are in the examples you might want to deviate a little):\n```markdown\n## Commit message format\n**Example 1:**\nInput: Added user authentication with JWT tokens\nOutput: feat(auth): implement JWT-based authentication\n```\n\n### Writing Style\n\nTry to explain to the model why things are important in lieu of heavy-handed musty MUSTs. Use theory of mind and try to make the skill general and not super-narrow to specific examples. Start by writing a draft and then look at it with fresh eyes and improve it.\n\n### Test Cases\n\nAfter writing the skill draft, come up with 2-3 realistic test prompts — the kind of thing a real user would actually say. Share them with the user: [you don't have to use this exact language] \"Here are a few test cases I'd like to try. Do these look right, or do you want to add more?\" Then run them.\n\nSave test cases to `evals/evals.json`. Don't write assertions yet — just the prompts. You'll draft assertions in the next step while the runs are in progress.\n\n```json\n{\n \"skill_name\": \"example-skill\",\n \"evals\": [\n {\n \"id\": 1,\n \"prompt\": \"User's task prompt\",\n \"expected_output\": \"Description of expected result\",\n \"files\": []\n }\n ]\n}\n```\n\nSee `references/schemas.md` for the full schema (including the `assertions` field, which you'll add later).\n\n## Running and evaluating test cases\n\nThis section is one continuous sequence — don't stop partway through. Do NOT use `/skill-test` or any other testing skill.\n\nPut results in `-workspace/` as a sibling to the skill directory. Within the workspace, organize results by iteration (`iteration-1/`, `iteration-2/`, etc.) and within that, each test case gets a directory (`eval-0/`, `eval-1/`, etc.). Don't create all of this upfront — just create directories as you go.\n\n### Step 1: Spawn all runs (with-skill AND baseline) in the same turn\n\nFor each test case, spawn two subagents in the same turn — one with the skill, one without. This is important: don't spawn the with-skill runs first and then come back for baselines later. Launch everything at once so it all finishes around the same time.\n\n**With-skill run:**\n\n```\nExecute this task:\n- Skill path: \n- Task: \n- Input files: \n- Save outputs to: /iteration-/eval-/with_skill/outputs/\n- Outputs to save: \n```\n\n**Baseline run** (same prompt, but the baseline depends on context):\n- **Creating a new skill**: no skill at all. Same prompt, no skill path, save to `without_skill/outputs/`.\n- **Improving an existing skill**: the old version. Before editing, snapshot the skill (`cp -r /skill-snapshot/`), then point the baseline subagent at the snapshot. Save to `old_skill/outputs/`.\n\nWrite an `eval_metadata.json` for each test case (assertions can be empty for now). Give each eval a descriptive name based on what it's testing — not just \"eval-0\". Use this name for the directory too. If this iteration uses new or modified eval prompts, create these files for each new eval directory — don't assume they carry over from previous iterations.\n\n```json\n{\n \"eval_id\": 0,\n \"eval_name\": \"descriptive-name-here\",\n \"prompt\": \"The user's task prompt\",\n \"assertions\": []\n}\n```\n\n### Step 2: While runs are in progress, draft assertions\n\nDon't just wait for the runs to finish — you can use this time productively. Draft quantitative assertions for each test case and explain them to the user. If assertions already exist in `evals/evals.json`, review them and explain what they check.\n\nGood assertions are objectively verifiable and have descriptive names — they should read clearly in the benchmark viewer so someone glancing at the results immediately understands what each one checks. Subjective skills (writing style, design quality) are better evaluated qualitatively — don't force assertions onto things that need human judgment.\n\nUpdate the `eval_metadata.json` files and `evals/evals.json` with the assertions once drafted. Also explain to the user what they'll see in the viewer — both the qualitative outputs and the quantitative benchmark.\n\n### Step 3: As runs complete, capture timing data\n\nWhen each subagent task completes, you receive a notification containing `total_tokens` and `duration_ms`. Save this data immediately to `timing.json` in the run directory:\n\n```json\n{\n \"total_tokens\": 84852,\n \"duration_ms\": 23332,\n \"total_duration_seconds\": 23.3\n}\n```\n\nThis is the only opportunity to capture this data — it comes through the task notification and isn't persisted elsewhere. Process each notification as it arrives rather than trying to batch them.\n\n### Step 4: Grade, aggregate, and launch the viewer\n\nOnce all runs are done:\n\n1. **Grade each run** — spawn a grader subagent (or grade inline) that reads `agents/grader.md` and evaluates each assertion against the outputs. Save results to `grading.json` in each run directory. The grading.json expectations array must use the fields `text`, `passed`, and `evidence` (not `name`/`met`/`details` or other variants) — the viewer depends on these exact field names. For assertions that can be checked programmatically, write and run a script rather than eyeballing it — scripts are faster, more reliable, and can be reused across iterations.\n\n2. **Aggregate into benchmark** — run the aggregation script from the skill-creator directory:\n ```bash\n python -m scripts.aggregate_benchmark /iteration-N --skill-name \n ```\n This produces `benchmark.json` and `benchmark.md` with pass_rate, time, and tokens for each configuration, with mean ± stddev and the delta. If generating benchmark.json manually, see `references/schemas.md` for the exact schema the viewer expects.\nPut each with_skill version before its baseline counterpart.\n\n3. **Do an analyst pass** — read the benchmark data and surface patterns the aggregate stats might hide. See `agents/analyzer.md` (the \"Analyzing Benchmark Results\" section) for what to look for — things like assertions that always pass regardless of skill (non-discriminating), high-variance evals (possibly flaky), and time/token tradeoffs.\n\n4. **Launch the viewer** with both qualitative outputs and quantitative data:\n ```bash\n nohup python /eval-viewer/generate_review.py \\\n /iteration-N \\\n --skill-name \"my-skill\" \\\n --benchmark /iteration-N/benchmark.json \\\n > /dev/null 2>&1 &\n VIEWER_PID=$!\n ```\n For iteration 2+, also pass `--previous-workspace /iteration-`.\n\n **Cowork / headless environments:** If `webbrowser.open()` is not available or the environment has no display, use `--static ` to write a standalone HTML file instead of starting a server. Feedback will be downloaded as a `feedback.json` file when the user clicks \"Submit All Reviews\". After download, copy `feedback.json` into the workspace directory for the next iteration to pick up.\n\nNote: please use generate_review.py to create the viewer; there's no need to write custom HTML.\n\n5. **Tell the user** something like: \"I've opened the results in your browser. There are two tabs — 'Outputs' lets you click through each test case and leave feedback, 'Benchmark' shows the quantitative comparison. When you're done, come back here and let me know.\"\n\n### What the user sees in the viewer\n\nThe \"Outputs\" tab shows one test case at a time:\n- **Prompt**: the task that was given\n- **Output**: the files the skill produced, rendered inline where possible\n- **Previous Output** (iteration 2+): collapsed section showing last iteration's output\n- **Formal Grades** (if grading was run): collapsed section showing assertion pass/fail\n- **Feedback**: a textbox that auto-saves as they type\n- **Previous Feedback** (iteration 2+): their comments from last time, shown below the textbox\n\nThe \"Benchmark\" tab shows the stats summary: pass rates, timing, and token usage for each configuration, with per-eval breakdowns and analyst observations.\n\nNavigation is via prev/next buttons or arrow keys. When done, they click \"Submit All Reviews\" which saves all feedback to `feedback.json`.\n\n### Step 5: Read the feedback\n\nWhen the user tells you they're done, read `feedback.json`:\n\n```json\n{\n \"reviews\": [\n {\"run_id\": \"eval-0-with_skill\", \"feedback\": \"the chart is missing axis labels\", \"timestamp\": \"...\"},\n {\"run_id\": \"eval-1-with_skill\", \"feedback\": \"\", \"timestamp\": \"...\"},\n {\"run_id\": \"eval-2-with_skill\", \"feedback\": \"perfect, love this\", \"timestamp\": \"...\"}\n ],\n \"status\": \"complete\"\n}\n```\n\nEmpty feedback means the user thought it was fine. Focus your improvements on the test cases where the user had specific complaints.\n\nKill the viewer server when you're done with it:\n\n```bash\nkill $VIEWER_PID 2>/dev/null\n```\n\n---\n\n## Improving the skill\n\nThis is the heart of the loop. You've run the test cases, the user has reviewed the results, and now you need to make the skill better based on their feedback.\n\n### How to think about improvements\n\n1. **Generalize from the feedback.** The big picture thing that's happening here is that we're trying to create skills that can be used a million times (maybe literally, maybe even more who knows) across many different prompts. Here you and the user are iterating on only a few examples over and over again because it helps move faster. The user knows these examples in and out and it's quick for them to assess new outputs. But if the skill you and the user are codeveloping works only for those examples, it's useless. Rather than put in fiddly overfitty changes, or oppressively constrictive MUSTs, if there's some stubborn issue, you might try branching out and using different metaphors, or recommending different patterns of working. It's relatively cheap to try and maybe you'll land on something great.\n\n2. **Keep the prompt lean.** Remove things that aren't pulling their weight. Make sure to read the transcripts, not just the final outputs — if it looks like the skill is making the model waste a bunch of time doing things that are unproductive, you can try getting rid of the parts of the skill that are making it do that and seeing what happens.\n\n3. **Explain the why.** Try hard to explain the **why** behind everything you're asking the model to do. Today's LLMs are *smart*. They have good theory of mind and when given a good harness can go beyond rote instructions and really make things happen. Even if the feedback from the user is terse or frustrated, try to actually understand the task and why the user is writing what they wrote, and what they actually wrote, and then transmit this understanding into the instructions. If you find yourself writing ALWAYS or NEVER in all caps, or using super rigid structures, that's a yellow flag — if possible, reframe and explain the reasoning so that the model understands why the thing you're asking for is important. That's a more humane, powerful, and effective approach.\n\n4. **Look for repeated work across test cases.** Read the transcripts from the test runs and notice if the subagents all independently wrote similar helper scripts or took the same multi-step approach to something. If all 3 test cases resulted in the subagent writing a `create_docx.py` or a `build_chart.py`, that's a strong signal the skill should bundle that script. Write it once, put it in `scripts/`, and tell the skill to use it. This saves every future invocation from reinventing the wheel.\n\nThis task is pretty important (we are trying to create billions a year in economic value here!) and your thinking time is not the blocker; take your time and really mull things over. I'd suggest writing a draft revision and then looking at it anew and making improvements. Really do your best to get into the head of the user and understand what they want and need.\n\n### The iteration loop\n\nAfter improving the skill:\n\n1. Apply your improvements to the skill\n2. Rerun all test cases into a new `iteration-/` directory, including baseline runs. If you're creating a new skill, the baseline is always `without_skill` (no skill) — that stays the same across iterations. If you're improving an existing skill, use your judgment on what makes sense as the baseline: the original version the user came in with, or the previous iteration.\n3. Launch the reviewer with `--previous-workspace` pointing at the previous iteration\n4. Wait for the user to review and tell you they're done\n5. Read the new feedback, improve again, repeat\n\nKeep going until:\n- The user says they're happy\n- The feedback is all empty (everything looks good)\n- You're not making meaningful progress\n\n---\n\n## Advanced: Blind comparison\n\nFor situations where you want a more rigorous comparison between two versions of a skill (e.g., the user asks \"is the new version actually better?\"), there's a blind comparison system. Read `agents/comparator.md` and `agents/analyzer.md` for the details. The basic idea is: give two outputs to an independent agent without telling it which is which, and let it judge quality. Then analyze why the winner won.\n\nThis is optional, requires subagents, and most users won't need it. The human review loop is usually sufficient.\n\n---\n\n## Description Optimization\n\nThe description field in SKILL.md frontmatter is the primary mechanism that determines whether Claude invokes a skill. After creating or improving a skill, offer to optimize the description for better triggering accuracy.\n\n### Step 1: Generate trigger eval queries\n\nCreate 20 eval queries — a mix of should-trigger and should-not-trigger. Save as JSON:\n\n```json\n[\n {\"query\": \"the user prompt\", \"should_trigger\": true},\n {\"query\": \"another prompt\", \"should_trigger\": false}\n]\n```\n\nThe queries must be realistic and something a Claude Code or Claude.ai user would actually type. Not abstract requests, but requests that are concrete and specific and have a good amount of detail. For instance, file paths, personal context about the user's job or situation, column names and values, company names, URLs. A little bit of backstory. Some might be in lowercase or contain abbreviations or typos or casual speech. Use a mix of different lengths, and focus on edge cases rather than making them clear-cut (the user will get a chance to sign off on them).\n\nBad: `\"Format this data\"`, `\"Extract text from PDF\"`, `\"Create a chart\"`\n\nGood: `\"ok so my boss just sent me this xlsx file (its in my downloads, called something like 'Q4 sales final FINAL v2.xlsx') and she wants me to add a column that shows the profit margin as a percentage. The revenue is in column C and costs are in column D i think\"`\n\nFor the **should-trigger** queries (8-10), think about coverage. You want different phrasings of the same intent — some formal, some casual. Include cases where the user doesn't explicitly name the skill or file type but clearly needs it. Throw in some uncommon use cases and cases where this skill competes with another but should win.\n\nFor the **should-not-trigger** queries (8-10), the most valuable ones are the near-misses — queries that share keywords or concepts with the skill but actually need something different. Think adjacent domains, ambiguous phrasing where a naive keyword match would trigger but shouldn't, and cases where the query touches on something the skill does but in a context where another tool is more appropriate.\n\nThe key thing to avoid: don't make should-not-trigger queries obviously irrelevant. \"Write a fibonacci function\" as a negative test for a PDF skill is too easy — it doesn't test anything. The negative cases should be genuinely tricky.\n\n### Step 2: Review with user\n\nPresent the eval set to the user for review using the HTML template:\n\n1. Read the template from `assets/eval_review.html`\n2. Replace the placeholders:\n - `__EVAL_DATA_PLACEHOLDER__` → the JSON array of eval items (no quotes around it — it's a JS variable assignment)\n - `__SKILL_NAME_PLACEHOLDER__` → the skill's name\n - `__SKILL_DESCRIPTION_PLACEHOLDER__` → the skill's current description\n3. Write to a temp file (e.g., `/tmp/eval_review_.html`) and open it: `open /tmp/eval_review_.html`\n4. The user can edit queries, toggle should-trigger, add/remove entries, then click \"Export Eval Set\"\n5. The file downloads to `~/Downloads/eval_set.json` — check the Downloads folder for the most recent version in case there are multiple (e.g., `eval_set (1).json`)\n\nThis step matters — bad eval queries lead to bad descriptions.\n\n### Step 3: Run the optimization loop\n\nTell the user: \"This will take some time — I'll run the optimization loop in the background and check on it periodically.\"\n\nSave the eval set to the workspace, then run in the background:\n\n```bash\npython -m scripts.run_loop \\\n --eval-set \\\n --skill-path \\\n --model \\\n --max-iterations 5 \\\n --verbose\n```\n\nUse the model ID from your system prompt (the one powering the current session) so the triggering test matches what the user actually experiences.\n\nWhile it runs, periodically tail the output to give the user updates on which iteration it's on and what the scores look like.\n\nThis handles the full optimization loop automatically. It splits the eval set into 60% train and 40% held-out test, evaluates the current description (running each query 3 times to get a reliable trigger rate), then calls Claude to propose improvements based on what failed. It re-evaluates each new description on both train and test, iterating up to 5 times. When it's done, it opens an HTML report in the browser showing the results per iteration and returns JSON with `best_description` — selected by test score rather than train score to avoid overfitting.\n\n### How skill triggering works\n\nUnderstanding the triggering mechanism helps design better eval queries. Skills appear in Claude's `available_skills` list with their name + description, and Claude decides whether to consult a skill based on that description. The important thing to know is that Claude only consults skills for tasks it can't easily handle on its own — simple, one-step queries like \"read this PDF\" may not trigger a skill even if the description matches perfectly, because Claude can handle them directly with basic tools. Complex, multi-step, or specialized queries reliably trigger skills when the description matches.\n\nThis means your eval queries should be substantive enough that Claude would actually benefit from consulting a skill. Simple queries like \"read file X\" are poor test cases — they won't trigger skills regardless of description quality.\n\n### Step 4: Apply the result\n\nTake `best_description` from the JSON output and update the skill's SKILL.md frontmatter. Show the user before/after and report the scores.\n\n---\n\n### Package and Present (only if `present_files` tool is available)\n\nCheck whether you have access to the `present_files` tool. If you don't, skip this step. If you do, package the skill and present the .skill file to the user:\n\n```bash\npython -m scripts.package_skill \n```\n\nAfter packaging, direct the user to the resulting `.skill` file path so they can install it.\n\n---\n\n## Claude.ai-specific instructions\n\nIn Claude.ai, the core workflow is the same (draft → test → review → improve → repeat), but because Claude.ai doesn't have subagents, some mechanics change. Here's what to adapt:\n\n**Running test cases**: No subagents means no parallel execution. For each test case, read the skill's SKILL.md, then follow its instructions to accomplish the test prompt yourself. Do them one at a time. This is less rigorous than independent subagents (you wrote the skill and you're also running it, so you have full context), but it's a useful sanity check — and the human review step compensates. Skip the baseline runs — just use the skill to complete the task as requested.\n\n**Reviewing results**: If you can't open a browser (e.g., Claude.ai's VM has no display, or you're on a remote server), skip the browser reviewer entirely. Instead, present results directly in the conversation. For each test case, show the prompt and the output. If the output is a file the user needs to see (like a .docx or .xlsx), save it to the filesystem and tell them where it is so they can download and inspect it. Ask for feedback inline: \"How does this look? Anything you'd change?\"\n\n**Benchmarking**: Skip the quantitative benchmarking — it relies on baseline comparisons which aren't meaningful without subagents. Focus on qualitative feedback from the user.\n\n**The iteration loop**: Same as before — improve the skill, rerun the test cases, ask for feedback — just without the browser reviewer in the middle. You can still organize results into iteration directories on the filesystem if you have one.\n\n**Description optimization**: This section requires the `claude` CLI tool (specifically `claude -p`) which is only available in Claude Code. Skip it if you're on Claude.ai.\n\n**Blind comparison**: Requires subagents. Skip it.\n\n**Packaging**: The `package_skill.py` script works anywhere with Python and a filesystem. On Claude.ai, you can run it and the user can download the resulting `.skill` file.\n\n**Updating an existing skill**: The user might be asking you to update an existing skill, not create a new one. In this case:\n- **Preserve the original name.** Note the skill's directory name and `name` frontmatter field -- use them unchanged. E.g., if the installed skill is `research-helper`, output `research-helper.skill` (not `research-helper-v2`).\n- **Copy to a writeable location before editing.** The installed skill path may be read-only. Copy to `/tmp/skill-name/`, edit there, and package from the copy.\n- **If packaging manually, stage in `/tmp/` first**, then copy to the output directory -- direct writes may fail due to permissions.\n\n---\n\n## Cowork-Specific Instructions\n\nIf you're in Cowork, the main things to know are:\n\n- You have subagents, so the main workflow (spawn test cases in parallel, run baselines, grade, etc.) all works. (However, if you run into severe problems with timeouts, it's OK to run the test prompts in series rather than parallel.)\n- You don't have a browser or display, so when generating the eval viewer, use `--static ` to write a standalone HTML file instead of starting a server. Then proffer a link that the user can click to open the HTML in their browser.\n- For whatever reason, the Cowork setup seems to disincline Claude from generating the eval viewer after running the tests, so just to reiterate: whether you're in Cowork or in Claude Code, after running tests, you should always generate the eval viewer for the human to look at examples before revising the skill yourself and trying to make corrections, using `generate_review.py` (not writing your own boutique html code). Sorry in advance but I'm gonna go all caps here: GENERATE THE EVAL VIEWER *BEFORE* evaluating inputs yourself. You want to get them in front of the human ASAP!\n- Feedback works differently: since there's no running server, the viewer's \"Submit All Reviews\" button will download `feedback.json` as a file. You can then read it from there (you may have to request access first).\n- Packaging works — `package_skill.py` just needs Python and a filesystem.\n- Description optimization (`run_loop.py` / `run_eval.py`) should work in Cowork just fine since it uses `claude -p` via subprocess, not a browser, but please save it until you've fully finished making the skill and the user agrees it's in good shape.\n- **Updating an existing skill**: The user might be asking you to update an existing skill, not create a new one. Follow the update guidance in the claude.ai section above.\n\n---\n\n## Reference files\n\nThe agents/ directory contains instructions for specialized subagents. Read them when you need to spawn the relevant subagent.\n\n- `agents/grader.md` — How to evaluate assertions against outputs\n- `agents/comparator.md` — How to do blind A/B comparison between two outputs\n- `agents/analyzer.md` — How to analyze why one version beat another\n\nThe references/ directory has additional documentation:\n- `references/schemas.md` — JSON structures for evals.json, grading.json, etc.\n\n---\n\nRepeating one more time the core loop here for emphasis:\n\n- Figure out what the skill is about\n- Draft or edit the skill\n- Run claude-with-access-to-the-skill on test prompts\n- With the user, evaluate the outputs:\n - Create benchmark.json and run `eval-viewer/generate_review.py` to help the user review them\n - Run quantitative evals\n- Repeat until you and the user are satisfied\n- Package the final skill and return it to the user.\n\nPlease add steps to your TodoList, if you have such a thing, to make sure you don't forget. If you're in Cowork, please specifically put \"Create evals JSON and run `eval-viewer/generate_review.py` so human can review test cases\" in your TodoList to make sure it happens.\n\nGood luck!\n" + }, + { + "name": "agents", + "node_type": "folder", + "children": [ + { + "name": "analyzer.md", + "node_type": "file", + "content": "# Post-hoc Analyzer Agent\n\nAnalyze blind comparison results to understand WHY the winner won and generate improvement suggestions.\n\n## Role\n\nAfter the blind comparator determines a winner, the Post-hoc Analyzer \"unblids\" the results by examining the skills and transcripts. The goal is to extract actionable insights: what made the winner better, and how can the loser be improved?\n\n## Inputs\n\nYou receive these parameters in your prompt:\n\n- **winner**: \"A\" or \"B\" (from blind comparison)\n- **winner_skill_path**: Path to the skill that produced the winning output\n- **winner_transcript_path**: Path to the execution transcript for the winner\n- **loser_skill_path**: Path to the skill that produced the losing output\n- **loser_transcript_path**: Path to the execution transcript for the loser\n- **comparison_result_path**: Path to the blind comparator's output JSON\n- **output_path**: Where to save the analysis results\n\n## Process\n\n### Step 1: Read Comparison Result\n\n1. Read the blind comparator's output at comparison_result_path\n2. Note the winning side (A or B), the reasoning, and any scores\n3. Understand what the comparator valued in the winning output\n\n### Step 2: Read Both Skills\n\n1. Read the winner skill's SKILL.md and key referenced files\n2. Read the loser skill's SKILL.md and key referenced files\n3. Identify structural differences:\n - Instructions clarity and specificity\n - Script/tool usage patterns\n - Example coverage\n - Edge case handling\n\n### Step 3: Read Both Transcripts\n\n1. Read the winner's transcript\n2. Read the loser's transcript\n3. Compare execution patterns:\n - How closely did each follow their skill's instructions?\n - What tools were used differently?\n - Where did the loser diverge from optimal behavior?\n - Did either encounter errors or make recovery attempts?\n\n### Step 4: Analyze Instruction Following\n\nFor each transcript, evaluate:\n- Did the agent follow the skill's explicit instructions?\n- Did the agent use the skill's provided tools/scripts?\n- Were there missed opportunities to leverage skill content?\n- Did the agent add unnecessary steps not in the skill?\n\nScore instruction following 1-10 and note specific issues.\n\n### Step 5: Identify Winner Strengths\n\nDetermine what made the winner better:\n- Clearer instructions that led to better behavior?\n- Better scripts/tools that produced better output?\n- More comprehensive examples that guided edge cases?\n- Better error handling guidance?\n\nBe specific. Quote from skills/transcripts where relevant.\n\n### Step 6: Identify Loser Weaknesses\n\nDetermine what held the loser back:\n- Ambiguous instructions that led to suboptimal choices?\n- Missing tools/scripts that forced workarounds?\n- Gaps in edge case coverage?\n- Poor error handling that caused failures?\n\n### Step 7: Generate Improvement Suggestions\n\nBased on the analysis, produce actionable suggestions for improving the loser skill:\n- Specific instruction changes to make\n- Tools/scripts to add or modify\n- Examples to include\n- Edge cases to address\n\nPrioritize by impact. Focus on changes that would have changed the outcome.\n\n### Step 8: Write Analysis Results\n\nSave structured analysis to `{output_path}`.\n\n## Output Format\n\nWrite a JSON file with this structure:\n\n```json\n{\n \"comparison_summary\": {\n \"winner\": \"A\",\n \"winner_skill\": \"path/to/winner/skill\",\n \"loser_skill\": \"path/to/loser/skill\",\n \"comparator_reasoning\": \"Brief summary of why comparator chose winner\"\n },\n \"winner_strengths\": [\n \"Clear step-by-step instructions for handling multi-page documents\",\n \"Included validation script that caught formatting errors\",\n \"Explicit guidance on fallback behavior when OCR fails\"\n ],\n \"loser_weaknesses\": [\n \"Vague instruction 'process the document appropriately' led to inconsistent behavior\",\n \"No script for validation, agent had to improvise and made errors\",\n \"No guidance on OCR failure, agent gave up instead of trying alternatives\"\n ],\n \"instruction_following\": {\n \"winner\": {\n \"score\": 9,\n \"issues\": [\n \"Minor: skipped optional logging step\"\n ]\n },\n \"loser\": {\n \"score\": 6,\n \"issues\": [\n \"Did not use the skill's formatting template\",\n \"Invented own approach instead of following step 3\",\n \"Missed the 'always validate output' instruction\"\n ]\n }\n },\n \"improvement_suggestions\": [\n {\n \"priority\": \"high\",\n \"category\": \"instructions\",\n \"suggestion\": \"Replace 'process the document appropriately' with explicit steps: 1) Extract text, 2) Identify sections, 3) Format per template\",\n \"expected_impact\": \"Would eliminate ambiguity that caused inconsistent behavior\"\n },\n {\n \"priority\": \"high\",\n \"category\": \"tools\",\n \"suggestion\": \"Add validate_output.py script similar to winner skill's validation approach\",\n \"expected_impact\": \"Would catch formatting errors before final output\"\n },\n {\n \"priority\": \"medium\",\n \"category\": \"error_handling\",\n \"suggestion\": \"Add fallback instructions: 'If OCR fails, try: 1) different resolution, 2) image preprocessing, 3) manual extraction'\",\n \"expected_impact\": \"Would prevent early failure on difficult documents\"\n }\n ],\n \"transcript_insights\": {\n \"winner_execution_pattern\": \"Read skill -> Followed 5-step process -> Used validation script -> Fixed 2 issues -> Produced output\",\n \"loser_execution_pattern\": \"Read skill -> Unclear on approach -> Tried 3 different methods -> No validation -> Output had errors\"\n }\n}\n```\n\n## Guidelines\n\n- **Be specific**: Quote from skills and transcripts, don't just say \"instructions were unclear\"\n- **Be actionable**: Suggestions should be concrete changes, not vague advice\n- **Focus on skill improvements**: The goal is to improve the losing skill, not critique the agent\n- **Prioritize by impact**: Which changes would most likely have changed the outcome?\n- **Consider causation**: Did the skill weakness actually cause the worse output, or is it incidental?\n- **Stay objective**: Analyze what happened, don't editorialize\n- **Think about generalization**: Would this improvement help on other evals too?\n\n## Categories for Suggestions\n\nUse these categories to organize improvement suggestions:\n\n| Category | Description |\n|----------|-------------|\n| `instructions` | Changes to the skill's prose instructions |\n| `tools` | Scripts, templates, or utilities to add/modify |\n| `examples` | Example inputs/outputs to include |\n| `error_handling` | Guidance for handling failures |\n| `structure` | Reorganization of skill content |\n| `references` | External docs or resources to add |\n\n## Priority Levels\n\n- **high**: Would likely change the outcome of this comparison\n- **medium**: Would improve quality but may not change win/loss\n- **low**: Nice to have, marginal improvement\n\n---\n\n# Analyzing Benchmark Results\n\nWhen analyzing benchmark results, the analyzer's purpose is to **surface patterns and anomalies** across multiple runs, not suggest skill improvements.\n\n## Role\n\nReview all benchmark run results and generate freeform notes that help the user understand skill performance. Focus on patterns that wouldn't be visible from aggregate metrics alone.\n\n## Inputs\n\nYou receive these parameters in your prompt:\n\n- **benchmark_data_path**: Path to the in-progress benchmark.json with all run results\n- **skill_path**: Path to the skill being benchmarked\n- **output_path**: Where to save the notes (as JSON array of strings)\n\n## Process\n\n### Step 1: Read Benchmark Data\n\n1. Read the benchmark.json containing all run results\n2. Note the configurations tested (with_skill, without_skill)\n3. Understand the run_summary aggregates already calculated\n\n### Step 2: Analyze Per-Assertion Patterns\n\nFor each expectation across all runs:\n- Does it **always pass** in both configurations? (may not differentiate skill value)\n- Does it **always fail** in both configurations? (may be broken or beyond capability)\n- Does it **always pass with skill but fail without**? (skill clearly adds value here)\n- Does it **always fail with skill but pass without**? (skill may be hurting)\n- Is it **highly variable**? (flaky expectation or non-deterministic behavior)\n\n### Step 3: Analyze Cross-Eval Patterns\n\nLook for patterns across evals:\n- Are certain eval types consistently harder/easier?\n- Do some evals show high variance while others are stable?\n- Are there surprising results that contradict expectations?\n\n### Step 4: Analyze Metrics Patterns\n\nLook at time_seconds, tokens, tool_calls:\n- Does the skill significantly increase execution time?\n- Is there high variance in resource usage?\n- Are there outlier runs that skew the aggregates?\n\n### Step 5: Generate Notes\n\nWrite freeform observations as a list of strings. Each note should:\n- State a specific observation\n- Be grounded in the data (not speculation)\n- Help the user understand something the aggregate metrics don't show\n\nExamples:\n- \"Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value\"\n- \"Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure that may be flaky\"\n- \"Without-skill runs consistently fail on table extraction expectations (0% pass rate)\"\n- \"Skill adds 13s average execution time but improves pass rate by 50%\"\n- \"Token usage is 80% higher with skill, primarily due to script output parsing\"\n- \"All 3 without-skill runs for eval 1 produced empty output\"\n\n### Step 6: Write Notes\n\nSave notes to `{output_path}` as a JSON array of strings:\n\n```json\n[\n \"Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value\",\n \"Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure\",\n \"Without-skill runs consistently fail on table extraction expectations\",\n \"Skill adds 13s average execution time but improves pass rate by 50%\"\n]\n```\n\n## Guidelines\n\n**DO:**\n- Report what you observe in the data\n- Be specific about which evals, expectations, or runs you're referring to\n- Note patterns that aggregate metrics would hide\n- Provide context that helps interpret the numbers\n\n**DO NOT:**\n- Suggest improvements to the skill (that's for the improvement step, not benchmarking)\n- Make subjective quality judgments (\"the output was good/bad\")\n- Speculate about causes without evidence\n- Repeat information already in the run_summary aggregates\n" + }, + { + "name": "comparator.md", + "node_type": "file", + "content": "# Blind Comparator Agent\n\nCompare two outputs WITHOUT knowing which skill produced them.\n\n## Role\n\nThe Blind Comparator judges which output better accomplishes the eval task. You receive two outputs labeled A and B, but you do NOT know which skill produced which. This prevents bias toward a particular skill or approach.\n\nYour judgment is based purely on output quality and task completion.\n\n## Inputs\n\nYou receive these parameters in your prompt:\n\n- **output_a_path**: Path to the first output file or directory\n- **output_b_path**: Path to the second output file or directory\n- **eval_prompt**: The original task/prompt that was executed\n- **expectations**: List of expectations to check (optional - may be empty)\n\n## Process\n\n### Step 1: Read Both Outputs\n\n1. Examine output A (file or directory)\n2. Examine output B (file or directory)\n3. Note the type, structure, and content of each\n4. If outputs are directories, examine all relevant files inside\n\n### Step 2: Understand the Task\n\n1. Read the eval_prompt carefully\n2. Identify what the task requires:\n - What should be produced?\n - What qualities matter (accuracy, completeness, format)?\n - What would distinguish a good output from a poor one?\n\n### Step 3: Generate Evaluation Rubric\n\nBased on the task, generate a rubric with two dimensions:\n\n**Content Rubric** (what the output contains):\n| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) |\n|-----------|----------|----------------|---------------|\n| Correctness | Major errors | Minor errors | Fully correct |\n| Completeness | Missing key elements | Mostly complete | All elements present |\n| Accuracy | Significant inaccuracies | Minor inaccuracies | Accurate throughout |\n\n**Structure Rubric** (how the output is organized):\n| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) |\n|-----------|----------|----------------|---------------|\n| Organization | Disorganized | Reasonably organized | Clear, logical structure |\n| Formatting | Inconsistent/broken | Mostly consistent | Professional, polished |\n| Usability | Difficult to use | Usable with effort | Easy to use |\n\nAdapt criteria to the specific task. For example:\n- PDF form → \"Field alignment\", \"Text readability\", \"Data placement\"\n- Document → \"Section structure\", \"Heading hierarchy\", \"Paragraph flow\"\n- Data output → \"Schema correctness\", \"Data types\", \"Completeness\"\n\n### Step 4: Evaluate Each Output Against the Rubric\n\nFor each output (A and B):\n\n1. **Score each criterion** on the rubric (1-5 scale)\n2. **Calculate dimension totals**: Content score, Structure score\n3. **Calculate overall score**: Average of dimension scores, scaled to 1-10\n\n### Step 5: Check Assertions (if provided)\n\nIf expectations are provided:\n\n1. Check each expectation against output A\n2. Check each expectation against output B\n3. Count pass rates for each output\n4. Use expectation scores as secondary evidence (not the primary decision factor)\n\n### Step 6: Determine the Winner\n\nCompare A and B based on (in priority order):\n\n1. **Primary**: Overall rubric score (content + structure)\n2. **Secondary**: Assertion pass rates (if applicable)\n3. **Tiebreaker**: If truly equal, declare a TIE\n\nBe decisive - ties should be rare. One output is usually better, even if marginally.\n\n### Step 7: Write Comparison Results\n\nSave results to a JSON file at the path specified (or `comparison.json` if not specified).\n\n## Output Format\n\nWrite a JSON file with this structure:\n\n```json\n{\n \"winner\": \"A\",\n \"reasoning\": \"Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.\",\n \"rubric\": {\n \"A\": {\n \"content\": {\n \"correctness\": 5,\n \"completeness\": 5,\n \"accuracy\": 4\n },\n \"structure\": {\n \"organization\": 4,\n \"formatting\": 5,\n \"usability\": 4\n },\n \"content_score\": 4.7,\n \"structure_score\": 4.3,\n \"overall_score\": 9.0\n },\n \"B\": {\n \"content\": {\n \"correctness\": 3,\n \"completeness\": 2,\n \"accuracy\": 3\n },\n \"structure\": {\n \"organization\": 3,\n \"formatting\": 2,\n \"usability\": 3\n },\n \"content_score\": 2.7,\n \"structure_score\": 2.7,\n \"overall_score\": 5.4\n }\n },\n \"output_quality\": {\n \"A\": {\n \"score\": 9,\n \"strengths\": [\"Complete solution\", \"Well-formatted\", \"All fields present\"],\n \"weaknesses\": [\"Minor style inconsistency in header\"]\n },\n \"B\": {\n \"score\": 5,\n \"strengths\": [\"Readable output\", \"Correct basic structure\"],\n \"weaknesses\": [\"Missing date field\", \"Formatting inconsistencies\", \"Partial data extraction\"]\n }\n },\n \"expectation_results\": {\n \"A\": {\n \"passed\": 4,\n \"total\": 5,\n \"pass_rate\": 0.80,\n \"details\": [\n {\"text\": \"Output includes name\", \"passed\": true},\n {\"text\": \"Output includes date\", \"passed\": true},\n {\"text\": \"Format is PDF\", \"passed\": true},\n {\"text\": \"Contains signature\", \"passed\": false},\n {\"text\": \"Readable text\", \"passed\": true}\n ]\n },\n \"B\": {\n \"passed\": 3,\n \"total\": 5,\n \"pass_rate\": 0.60,\n \"details\": [\n {\"text\": \"Output includes name\", \"passed\": true},\n {\"text\": \"Output includes date\", \"passed\": false},\n {\"text\": \"Format is PDF\", \"passed\": true},\n {\"text\": \"Contains signature\", \"passed\": false},\n {\"text\": \"Readable text\", \"passed\": true}\n ]\n }\n }\n}\n```\n\nIf no expectations were provided, omit the `expectation_results` field entirely.\n\n## Field Descriptions\n\n- **winner**: \"A\", \"B\", or \"TIE\"\n- **reasoning**: Clear explanation of why the winner was chosen (or why it's a tie)\n- **rubric**: Structured rubric evaluation for each output\n - **content**: Scores for content criteria (correctness, completeness, accuracy)\n - **structure**: Scores for structure criteria (organization, formatting, usability)\n - **content_score**: Average of content criteria (1-5)\n - **structure_score**: Average of structure criteria (1-5)\n - **overall_score**: Combined score scaled to 1-10\n- **output_quality**: Summary quality assessment\n - **score**: 1-10 rating (should match rubric overall_score)\n - **strengths**: List of positive aspects\n - **weaknesses**: List of issues or shortcomings\n- **expectation_results**: (Only if expectations provided)\n - **passed**: Number of expectations that passed\n - **total**: Total number of expectations\n - **pass_rate**: Fraction passed (0.0 to 1.0)\n - **details**: Individual expectation results\n\n## Guidelines\n\n- **Stay blind**: DO NOT try to infer which skill produced which output. Judge purely on output quality.\n- **Be specific**: Cite specific examples when explaining strengths and weaknesses.\n- **Be decisive**: Choose a winner unless outputs are genuinely equivalent.\n- **Output quality first**: Assertion scores are secondary to overall task completion.\n- **Be objective**: Don't favor outputs based on style preferences; focus on correctness and completeness.\n- **Explain your reasoning**: The reasoning field should make it clear why you chose the winner.\n- **Handle edge cases**: If both outputs fail, pick the one that fails less badly. If both are excellent, pick the one that's marginally better.\n" + }, + { + "name": "grader.md", + "node_type": "file", + "content": "# Grader Agent\n\nEvaluate expectations against an execution transcript and outputs.\n\n## Role\n\nThe Grader reviews a transcript and output files, then determines whether each expectation passes or fails. Provide clear evidence for each judgment.\n\nYou have two jobs: grade the outputs, and critique the evals themselves. A passing grade on a weak assertion is worse than useless — it creates false confidence. When you notice an assertion that's trivially satisfied, or an important outcome that no assertion checks, say so.\n\n## Inputs\n\nYou receive these parameters in your prompt:\n\n- **expectations**: List of expectations to evaluate (strings)\n- **transcript_path**: Path to the execution transcript (markdown file)\n- **outputs_dir**: Directory containing output files from execution\n\n## Process\n\n### Step 1: Read the Transcript\n\n1. Read the transcript file completely\n2. Note the eval prompt, execution steps, and final result\n3. Identify any issues or errors documented\n\n### Step 2: Examine Output Files\n\n1. List files in outputs_dir\n2. Read/examine each file relevant to the expectations. If outputs aren't plain text, use the inspection tools provided in your prompt — don't rely solely on what the transcript says the executor produced.\n3. Note contents, structure, and quality\n\n### Step 3: Evaluate Each Assertion\n\nFor each expectation:\n\n1. **Search for evidence** in the transcript and outputs\n2. **Determine verdict**:\n - **PASS**: Clear evidence the expectation is true AND the evidence reflects genuine task completion, not just surface-level compliance\n - **FAIL**: No evidence, or evidence contradicts the expectation, or the evidence is superficial (e.g., correct filename but empty/wrong content)\n3. **Cite the evidence**: Quote the specific text or describe what you found\n\n### Step 4: Extract and Verify Claims\n\nBeyond the predefined expectations, extract implicit claims from the outputs and verify them:\n\n1. **Extract claims** from the transcript and outputs:\n - Factual statements (\"The form has 12 fields\")\n - Process claims (\"Used pypdf to fill the form\")\n - Quality claims (\"All fields were filled correctly\")\n\n2. **Verify each claim**:\n - **Factual claims**: Can be checked against the outputs or external sources\n - **Process claims**: Can be verified from the transcript\n - **Quality claims**: Evaluate whether the claim is justified\n\n3. **Flag unverifiable claims**: Note claims that cannot be verified with available information\n\nThis catches issues that predefined expectations might miss.\n\n### Step 5: Read User Notes\n\nIf `{outputs_dir}/user_notes.md` exists:\n1. Read it and note any uncertainties or issues flagged by the executor\n2. Include relevant concerns in the grading output\n3. These may reveal problems even when expectations pass\n\n### Step 6: Critique the Evals\n\nAfter grading, consider whether the evals themselves could be improved. Only surface suggestions when there's a clear gap.\n\nGood suggestions test meaningful outcomes — assertions that are hard to satisfy without actually doing the work correctly. Think about what makes an assertion *discriminating*: it passes when the skill genuinely succeeds and fails when it doesn't.\n\nSuggestions worth raising:\n- An assertion that passed but would also pass for a clearly wrong output (e.g., checking filename existence but not file content)\n- An important outcome you observed — good or bad — that no assertion covers at all\n- An assertion that can't actually be verified from the available outputs\n\nKeep the bar high. The goal is to flag things the eval author would say \"good catch\" about, not to nitpick every assertion.\n\n### Step 7: Write Grading Results\n\nSave results to `{outputs_dir}/../grading.json` (sibling to outputs_dir).\n\n## Grading Criteria\n\n**PASS when**:\n- The transcript or outputs clearly demonstrate the expectation is true\n- Specific evidence can be cited\n- The evidence reflects genuine substance, not just surface compliance (e.g., a file exists AND contains correct content, not just the right filename)\n\n**FAIL when**:\n- No evidence found for the expectation\n- Evidence contradicts the expectation\n- The expectation cannot be verified from available information\n- The evidence is superficial — the assertion is technically satisfied but the underlying task outcome is wrong or incomplete\n- The output appears to meet the assertion by coincidence rather than by actually doing the work\n\n**When uncertain**: The burden of proof to pass is on the expectation.\n\n### Step 8: Read Executor Metrics and Timing\n\n1. If `{outputs_dir}/metrics.json` exists, read it and include in grading output\n2. If `{outputs_dir}/../timing.json` exists, read it and include timing data\n\n## Output Format\n\nWrite a JSON file with this structure:\n\n```json\n{\n \"expectations\": [\n {\n \"text\": \"The output includes the name 'John Smith'\",\n \"passed\": true,\n \"evidence\": \"Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'\"\n },\n {\n \"text\": \"The spreadsheet has a SUM formula in cell B10\",\n \"passed\": false,\n \"evidence\": \"No spreadsheet was created. The output was a text file.\"\n },\n {\n \"text\": \"The assistant used the skill's OCR script\",\n \"passed\": true,\n \"evidence\": \"Transcript Step 2 shows: 'Tool: Bash - python ocr_script.py image.png'\"\n }\n ],\n \"summary\": {\n \"passed\": 2,\n \"failed\": 1,\n \"total\": 3,\n \"pass_rate\": 0.67\n },\n \"execution_metrics\": {\n \"tool_calls\": {\n \"Read\": 5,\n \"Write\": 2,\n \"Bash\": 8\n },\n \"total_tool_calls\": 15,\n \"total_steps\": 6,\n \"errors_encountered\": 0,\n \"output_chars\": 12450,\n \"transcript_chars\": 3200\n },\n \"timing\": {\n \"executor_duration_seconds\": 165.0,\n \"grader_duration_seconds\": 26.0,\n \"total_duration_seconds\": 191.0\n },\n \"claims\": [\n {\n \"claim\": \"The form has 12 fillable fields\",\n \"type\": \"factual\",\n \"verified\": true,\n \"evidence\": \"Counted 12 fields in field_info.json\"\n },\n {\n \"claim\": \"All required fields were populated\",\n \"type\": \"quality\",\n \"verified\": false,\n \"evidence\": \"Reference section was left blank despite data being available\"\n }\n ],\n \"user_notes_summary\": {\n \"uncertainties\": [\"Used 2023 data, may be stale\"],\n \"needs_review\": [],\n \"workarounds\": [\"Fell back to text overlay for non-fillable fields\"]\n },\n \"eval_feedback\": {\n \"suggestions\": [\n {\n \"assertion\": \"The output includes the name 'John Smith'\",\n \"reason\": \"A hallucinated document that mentions the name would also pass — consider checking it appears as the primary contact with matching phone and email from the input\"\n },\n {\n \"reason\": \"No assertion checks whether the extracted phone numbers match the input — I observed incorrect numbers in the output that went uncaught\"\n }\n ],\n \"overall\": \"Assertions check presence but not correctness. Consider adding content verification.\"\n }\n}\n```\n\n## Field Descriptions\n\n- **expectations**: Array of graded expectations\n - **text**: The original expectation text\n - **passed**: Boolean - true if expectation passes\n - **evidence**: Specific quote or description supporting the verdict\n- **summary**: Aggregate statistics\n - **passed**: Count of passed expectations\n - **failed**: Count of failed expectations\n - **total**: Total expectations evaluated\n - **pass_rate**: Fraction passed (0.0 to 1.0)\n- **execution_metrics**: Copied from executor's metrics.json (if available)\n - **output_chars**: Total character count of output files (proxy for tokens)\n - **transcript_chars**: Character count of transcript\n- **timing**: Wall clock timing from timing.json (if available)\n - **executor_duration_seconds**: Time spent in executor subagent\n - **total_duration_seconds**: Total elapsed time for the run\n- **claims**: Extracted and verified claims from the output\n - **claim**: The statement being verified\n - **type**: \"factual\", \"process\", or \"quality\"\n - **verified**: Boolean - whether the claim holds\n - **evidence**: Supporting or contradicting evidence\n- **user_notes_summary**: Issues flagged by the executor\n - **uncertainties**: Things the executor wasn't sure about\n - **needs_review**: Items requiring human attention\n - **workarounds**: Places where the skill didn't work as expected\n- **eval_feedback**: Improvement suggestions for the evals (only when warranted)\n - **suggestions**: List of concrete suggestions, each with a `reason` and optionally an `assertion` it relates to\n - **overall**: Brief assessment — can be \"No suggestions, evals look solid\" if nothing to flag\n\n## Guidelines\n\n- **Be objective**: Base verdicts on evidence, not assumptions\n- **Be specific**: Quote the exact text that supports your verdict\n- **Be thorough**: Check both transcript and output files\n- **Be consistent**: Apply the same standard to each expectation\n- **Explain failures**: Make it clear why evidence was insufficient\n- **No partial credit**: Each expectation is pass or fail, not partial\n" + } + ] + }, + { + "name": "assets", + "node_type": "folder", + "children": [ + { + "name": "eval_review.html", + "node_type": "file", + "content": "\n\n\n \n \n Eval Set Review - __SKILL_NAME_PLACEHOLDER__\n \n \n \n \n\n\n

Eval Set Review: __SKILL_NAME_PLACEHOLDER__

\n

Current description: __SKILL_DESCRIPTION_PLACEHOLDER__

\n\n
\n \n \n
\n\n \n \n \n \n \n \n \n \n \n
QueryShould TriggerActions
\n\n

\n\n \n\n\n" + } + ] + }, + { + "name": "eval-viewer", + "node_type": "folder", + "children": [ + { + "name": "generate_review.py", + "node_type": "file", + "content": "#!/usr/bin/env python3\n\"\"\"Generate and serve a review page for eval results.\n\nReads the workspace directory, discovers runs (directories with outputs/),\nembeds all output data into a self-contained HTML page, and serves it via\na tiny HTTP server. Feedback auto-saves to feedback.json in the workspace.\n\nUsage:\n python generate_review.py [--port PORT] [--skill-name NAME]\n python generate_review.py --previous-feedback /path/to/old/feedback.json\n\nNo dependencies beyond the Python stdlib are required.\n\"\"\"\n\nimport argparse\nimport base64\nimport json\nimport mimetypes\nimport os\nimport re\nimport signal\nimport subprocess\nimport sys\nimport time\nimport webbrowser\nfrom functools import partial\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom pathlib import Path\n\n# Files to exclude from output listings\nMETADATA_FILES = {\"transcript.md\", \"user_notes.md\", \"metrics.json\"}\n\n# Extensions we render as inline text\nTEXT_EXTENSIONS = {\n \".txt\", \".md\", \".json\", \".csv\", \".py\", \".js\", \".ts\", \".tsx\", \".jsx\",\n \".yaml\", \".yml\", \".xml\", \".html\", \".css\", \".sh\", \".rb\", \".go\", \".rs\",\n \".java\", \".c\", \".cpp\", \".h\", \".hpp\", \".sql\", \".r\", \".toml\",\n}\n\n# Extensions we render as inline images\nIMAGE_EXTENSIONS = {\".png\", \".jpg\", \".jpeg\", \".gif\", \".svg\", \".webp\"}\n\n# MIME type overrides for common types\nMIME_OVERRIDES = {\n \".svg\": \"image/svg+xml\",\n \".xlsx\": \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\",\n \".docx\": \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n \".pptx\": \"application/vnd.openxmlformats-officedocument.presentationml.presentation\",\n}\n\n\ndef get_mime_type(path: Path) -> str:\n ext = path.suffix.lower()\n if ext in MIME_OVERRIDES:\n return MIME_OVERRIDES[ext]\n mime, _ = mimetypes.guess_type(str(path))\n return mime or \"application/octet-stream\"\n\n\ndef find_runs(workspace: Path) -> list[dict]:\n \"\"\"Recursively find directories that contain an outputs/ subdirectory.\"\"\"\n runs: list[dict] = []\n _find_runs_recursive(workspace, workspace, runs)\n runs.sort(key=lambda r: (r.get(\"eval_id\", float(\"inf\")), r[\"id\"]))\n return runs\n\n\ndef _find_runs_recursive(root: Path, current: Path, runs: list[dict]) -> None:\n if not current.is_dir():\n return\n\n outputs_dir = current / \"outputs\"\n if outputs_dir.is_dir():\n run = build_run(root, current)\n if run:\n runs.append(run)\n return\n\n skip = {\"node_modules\", \".git\", \"__pycache__\", \"skill\", \"inputs\"}\n for child in sorted(current.iterdir()):\n if child.is_dir() and child.name not in skip:\n _find_runs_recursive(root, child, runs)\n\n\ndef build_run(root: Path, run_dir: Path) -> dict | None:\n \"\"\"Build a run dict with prompt, outputs, and grading data.\"\"\"\n prompt = \"\"\n eval_id = None\n\n # Try eval_metadata.json\n for candidate in [run_dir / \"eval_metadata.json\", run_dir.parent / \"eval_metadata.json\"]:\n if candidate.exists():\n try:\n metadata = json.loads(candidate.read_text())\n prompt = metadata.get(\"prompt\", \"\")\n eval_id = metadata.get(\"eval_id\")\n except (json.JSONDecodeError, OSError):\n pass\n if prompt:\n break\n\n # Fall back to transcript.md\n if not prompt:\n for candidate in [run_dir / \"transcript.md\", run_dir / \"outputs\" / \"transcript.md\"]:\n if candidate.exists():\n try:\n text = candidate.read_text()\n match = re.search(r\"## Eval Prompt\\n\\n([\\s\\S]*?)(?=\\n##|$)\", text)\n if match:\n prompt = match.group(1).strip()\n except OSError:\n pass\n if prompt:\n break\n\n if not prompt:\n prompt = \"(No prompt found)\"\n\n run_id = str(run_dir.relative_to(root)).replace(\"/\", \"-\").replace(\"\\\\\", \"-\")\n\n # Collect output files\n outputs_dir = run_dir / \"outputs\"\n output_files: list[dict] = []\n if outputs_dir.is_dir():\n for f in sorted(outputs_dir.iterdir()):\n if f.is_file() and f.name not in METADATA_FILES:\n output_files.append(embed_file(f))\n\n # Load grading if present\n grading = None\n for candidate in [run_dir / \"grading.json\", run_dir.parent / \"grading.json\"]:\n if candidate.exists():\n try:\n grading = json.loads(candidate.read_text())\n except (json.JSONDecodeError, OSError):\n pass\n if grading:\n break\n\n return {\n \"id\": run_id,\n \"prompt\": prompt,\n \"eval_id\": eval_id,\n \"outputs\": output_files,\n \"grading\": grading,\n }\n\n\ndef embed_file(path: Path) -> dict:\n \"\"\"Read a file and return an embedded representation.\"\"\"\n ext = path.suffix.lower()\n mime = get_mime_type(path)\n\n if ext in TEXT_EXTENSIONS:\n try:\n content = path.read_text(errors=\"replace\")\n except OSError:\n content = \"(Error reading file)\"\n return {\n \"name\": path.name,\n \"type\": \"text\",\n \"content\": content,\n }\n elif ext in IMAGE_EXTENSIONS:\n try:\n raw = path.read_bytes()\n b64 = base64.b64encode(raw).decode(\"ascii\")\n except OSError:\n return {\"name\": path.name, \"type\": \"error\", \"content\": \"(Error reading file)\"}\n return {\n \"name\": path.name,\n \"type\": \"image\",\n \"mime\": mime,\n \"data_uri\": f\"data:{mime};base64,{b64}\",\n }\n elif ext == \".pdf\":\n try:\n raw = path.read_bytes()\n b64 = base64.b64encode(raw).decode(\"ascii\")\n except OSError:\n return {\"name\": path.name, \"type\": \"error\", \"content\": \"(Error reading file)\"}\n return {\n \"name\": path.name,\n \"type\": \"pdf\",\n \"data_uri\": f\"data:{mime};base64,{b64}\",\n }\n elif ext == \".xlsx\":\n try:\n raw = path.read_bytes()\n b64 = base64.b64encode(raw).decode(\"ascii\")\n except OSError:\n return {\"name\": path.name, \"type\": \"error\", \"content\": \"(Error reading file)\"}\n return {\n \"name\": path.name,\n \"type\": \"xlsx\",\n \"data_b64\": b64,\n }\n else:\n # Binary / unknown — base64 download link\n try:\n raw = path.read_bytes()\n b64 = base64.b64encode(raw).decode(\"ascii\")\n except OSError:\n return {\"name\": path.name, \"type\": \"error\", \"content\": \"(Error reading file)\"}\n return {\n \"name\": path.name,\n \"type\": \"binary\",\n \"mime\": mime,\n \"data_uri\": f\"data:{mime};base64,{b64}\",\n }\n\n\ndef load_previous_iteration(workspace: Path) -> dict[str, dict]:\n \"\"\"Load previous iteration's feedback and outputs.\n\n Returns a map of run_id -> {\"feedback\": str, \"outputs\": list[dict]}.\n \"\"\"\n result: dict[str, dict] = {}\n\n # Load feedback\n feedback_map: dict[str, str] = {}\n feedback_path = workspace / \"feedback.json\"\n if feedback_path.exists():\n try:\n data = json.loads(feedback_path.read_text())\n feedback_map = {\n r[\"run_id\"]: r[\"feedback\"]\n for r in data.get(\"reviews\", [])\n if r.get(\"feedback\", \"\").strip()\n }\n except (json.JSONDecodeError, OSError, KeyError):\n pass\n\n # Load runs (to get outputs)\n prev_runs = find_runs(workspace)\n for run in prev_runs:\n result[run[\"id\"]] = {\n \"feedback\": feedback_map.get(run[\"id\"], \"\"),\n \"outputs\": run.get(\"outputs\", []),\n }\n\n # Also add feedback for run_ids that had feedback but no matching run\n for run_id, fb in feedback_map.items():\n if run_id not in result:\n result[run_id] = {\"feedback\": fb, \"outputs\": []}\n\n return result\n\n\ndef generate_html(\n runs: list[dict],\n skill_name: str,\n previous: dict[str, dict] | None = None,\n benchmark: dict | None = None,\n) -> str:\n \"\"\"Generate the complete standalone HTML page with embedded data.\"\"\"\n template_path = Path(__file__).parent / \"viewer.html\"\n template = template_path.read_text()\n\n # Build previous_feedback and previous_outputs maps for the template\n previous_feedback: dict[str, str] = {}\n previous_outputs: dict[str, list[dict]] = {}\n if previous:\n for run_id, data in previous.items():\n if data.get(\"feedback\"):\n previous_feedback[run_id] = data[\"feedback\"]\n if data.get(\"outputs\"):\n previous_outputs[run_id] = data[\"outputs\"]\n\n embedded = {\n \"skill_name\": skill_name,\n \"runs\": runs,\n \"previous_feedback\": previous_feedback,\n \"previous_outputs\": previous_outputs,\n }\n if benchmark:\n embedded[\"benchmark\"] = benchmark\n\n data_json = json.dumps(embedded)\n\n return template.replace(\"/*__EMBEDDED_DATA__*/\", f\"const EMBEDDED_DATA = {data_json};\")\n\n\n# ---------------------------------------------------------------------------\n# HTTP server (stdlib only, zero dependencies)\n# ---------------------------------------------------------------------------\n\ndef _kill_port(port: int) -> None:\n \"\"\"Kill any process listening on the given port.\"\"\"\n try:\n result = subprocess.run(\n [\"lsof\", \"-ti\", f\":{port}\"],\n capture_output=True, text=True, timeout=5,\n )\n for pid_str in result.stdout.strip().split(\"\\n\"):\n if pid_str.strip():\n try:\n os.kill(int(pid_str.strip()), signal.SIGTERM)\n except (ProcessLookupError, ValueError):\n pass\n if result.stdout.strip():\n time.sleep(0.5)\n except subprocess.TimeoutExpired:\n pass\n except FileNotFoundError:\n print(\"Note: lsof not found, cannot check if port is in use\", file=sys.stderr)\n\nclass ReviewHandler(BaseHTTPRequestHandler):\n \"\"\"Serves the review HTML and handles feedback saves.\n\n Regenerates the HTML on each page load so that refreshing the browser\n picks up new eval outputs without restarting the server.\n \"\"\"\n\n def __init__(\n self,\n workspace: Path,\n skill_name: str,\n feedback_path: Path,\n previous: dict[str, dict],\n benchmark_path: Path | None,\n *args,\n **kwargs,\n ):\n self.workspace = workspace\n self.skill_name = skill_name\n self.feedback_path = feedback_path\n self.previous = previous\n self.benchmark_path = benchmark_path\n super().__init__(*args, **kwargs)\n\n def do_GET(self) -> None:\n if self.path == \"/\" or self.path == \"/index.html\":\n # Regenerate HTML on each request (re-scans workspace for new outputs)\n runs = find_runs(self.workspace)\n benchmark = None\n if self.benchmark_path and self.benchmark_path.exists():\n try:\n benchmark = json.loads(self.benchmark_path.read_text())\n except (json.JSONDecodeError, OSError):\n pass\n html = generate_html(runs, self.skill_name, self.previous, benchmark)\n content = html.encode(\"utf-8\")\n self.send_response(200)\n self.send_header(\"Content-Type\", \"text/html; charset=utf-8\")\n self.send_header(\"Content-Length\", str(len(content)))\n self.end_headers()\n self.wfile.write(content)\n elif self.path == \"/api/feedback\":\n data = b\"{}\"\n if self.feedback_path.exists():\n data = self.feedback_path.read_bytes()\n self.send_response(200)\n self.send_header(\"Content-Type\", \"application/json\")\n self.send_header(\"Content-Length\", str(len(data)))\n self.end_headers()\n self.wfile.write(data)\n else:\n self.send_error(404)\n\n def do_POST(self) -> None:\n if self.path == \"/api/feedback\":\n length = int(self.headers.get(\"Content-Length\", 0))\n body = self.rfile.read(length)\n try:\n data = json.loads(body)\n if not isinstance(data, dict) or \"reviews\" not in data:\n raise ValueError(\"Expected JSON object with 'reviews' key\")\n self.feedback_path.write_text(json.dumps(data, indent=2) + \"\\n\")\n resp = b'{\"ok\":true}'\n self.send_response(200)\n except (json.JSONDecodeError, OSError, ValueError) as e:\n resp = json.dumps({\"error\": str(e)}).encode()\n self.send_response(500)\n self.send_header(\"Content-Type\", \"application/json\")\n self.send_header(\"Content-Length\", str(len(resp)))\n self.end_headers()\n self.wfile.write(resp)\n else:\n self.send_error(404)\n\n def log_message(self, format: str, *args: object) -> None:\n # Suppress request logging to keep terminal clean\n pass\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(description=\"Generate and serve eval review\")\n parser.add_argument(\"workspace\", type=Path, help=\"Path to workspace directory\")\n parser.add_argument(\"--port\", \"-p\", type=int, default=3117, help=\"Server port (default: 3117)\")\n parser.add_argument(\"--skill-name\", \"-n\", type=str, default=None, help=\"Skill name for header\")\n parser.add_argument(\n \"--previous-workspace\", type=Path, default=None,\n help=\"Path to previous iteration's workspace (shows old outputs and feedback as context)\",\n )\n parser.add_argument(\n \"--benchmark\", type=Path, default=None,\n help=\"Path to benchmark.json to show in the Benchmark tab\",\n )\n parser.add_argument(\n \"--static\", \"-s\", type=Path, default=None,\n help=\"Write standalone HTML to this path instead of starting a server\",\n )\n args = parser.parse_args()\n\n workspace = args.workspace.resolve()\n if not workspace.is_dir():\n print(f\"Error: {workspace} is not a directory\", file=sys.stderr)\n sys.exit(1)\n\n runs = find_runs(workspace)\n if not runs:\n print(f\"No runs found in {workspace}\", file=sys.stderr)\n sys.exit(1)\n\n skill_name = args.skill_name or workspace.name.replace(\"-workspace\", \"\")\n feedback_path = workspace / \"feedback.json\"\n\n previous: dict[str, dict] = {}\n if args.previous_workspace:\n previous = load_previous_iteration(args.previous_workspace.resolve())\n\n benchmark_path = args.benchmark.resolve() if args.benchmark else None\n benchmark = None\n if benchmark_path and benchmark_path.exists():\n try:\n benchmark = json.loads(benchmark_path.read_text())\n except (json.JSONDecodeError, OSError):\n pass\n\n if args.static:\n html = generate_html(runs, skill_name, previous, benchmark)\n args.static.parent.mkdir(parents=True, exist_ok=True)\n args.static.write_text(html)\n print(f\"\\n Static viewer written to: {args.static}\\n\")\n sys.exit(0)\n\n # Kill any existing process on the target port\n port = args.port\n _kill_port(port)\n handler = partial(ReviewHandler, workspace, skill_name, feedback_path, previous, benchmark_path)\n try:\n server = HTTPServer((\"127.0.0.1\", port), handler)\n except OSError:\n # Port still in use after kill attempt — find a free one\n server = HTTPServer((\"127.0.0.1\", 0), handler)\n port = server.server_address[1]\n\n url = f\"http://localhost:{port}\"\n print(f\"\\n Eval Viewer\")\n print(f\" ─────────────────────────────────\")\n print(f\" URL: {url}\")\n print(f\" Workspace: {workspace}\")\n print(f\" Feedback: {feedback_path}\")\n if previous:\n print(f\" Previous: {args.previous_workspace} ({len(previous)} runs)\")\n if benchmark_path:\n print(f\" Benchmark: {benchmark_path}\")\n print(f\"\\n Press Ctrl+C to stop.\\n\")\n\n webbrowser.open(url)\n\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n print(\"\\nStopped.\")\n server.server_close()\n\n\nif __name__ == \"__main__\":\n main()\n" + }, + { + "name": "viewer.html", + "node_type": "file", + "content": "\n\n\n \n \n Eval Review\n \n \n \n \n \n\n\n
\n
\n
\n

Eval Review:

\n
Review each output and leave feedback below. Navigate with arrow keys or buttons. When done, copy feedback and paste into Claude Code.
\n
\n
\n
\n\n \n
\n \n \n
\n\n \n
\n
\n \n
\n
Prompt
\n
\n
\n
\n
\n\n \n
\n
Output
\n
\n
No output files found
\n
\n
\n\n \n
\n
\n
\n \n Previous Output\n
\n
\n
\n
\n\n \n
\n
\n
\n \n Formal Grades\n
\n
\n
\n
\n\n \n
\n
Your Feedback
\n
\n \n
\n
\n
Previous feedback
\n
\n
\n
\n
\n
\n\n
\n \n \n \n
\n
\n\n \n
\n
\n
No benchmark data available. Run a benchmark to see quantitative results here.
\n
\n
\n
\n\n \n
\n
\n

Review Complete

\n

Your feedback has been saved. Go back to your Claude Code session and tell Claude you're done reviewing.

\n
\n \n
\n
\n
\n\n \n
\n\n \n\n\n" + } + ] }, { "name": "references", "node_type": "folder", "children": [ { - "name": "output-patterns.md", + "name": "schemas.md", "node_type": "file", - "content": "# Output Patterns\n\nUse these patterns when skills need to produce consistent, high-quality output.\n\n## Template Pattern\n\nProvide templates for output format. Match the level of strictness to your needs.\n\n**For strict requirements (like API responses or data formats):**\n\n```markdown\n## Report structure\n\nALWAYS use this exact template structure:\n\n# [Analysis Title]\n\n## Executive summary\n[One-paragraph overview of key findings]\n\n## Key findings\n- Finding 1 with supporting data\n- Finding 2 with supporting data\n- Finding 3 with supporting data\n\n## Recommendations\n1. Specific actionable recommendation\n2. Specific actionable recommendation\n```\n\n**For flexible guidance (when adaptation is useful):**\n\n```markdown\n## Report structure\n\nHere is a sensible default format, but use your best judgment:\n\n# [Analysis Title]\n\n## Executive summary\n[Overview]\n\n## Key findings\n[Adapt sections based on what you discover]\n\n## Recommendations\n[Tailor to the specific context]\n\nAdjust sections as needed for the specific analysis type.\n```\n\n## Examples Pattern\n\nFor skills where output quality depends on seeing examples, provide input/output pairs:\n\n```markdown\n## Commit message format\n\nGenerate commit messages following these examples:\n\n**Example 1:**\nInput: Added user authentication with JWT tokens\nOutput:\n```\nfeat(auth): implement JWT-based authentication\n\nAdd login endpoint and token validation middleware\n```\n\n**Example 2:**\nInput: Fixed bug where dates displayed incorrectly in reports\nOutput:\n```\nfix(reports): correct date formatting in timezone conversion\n\nUse UTC timestamps consistently across report generation\n```\n\nFollow this style: type(scope): brief description, then detailed explanation.\n```\n\nExamples help Claude understand the desired style and level of detail more clearly than descriptions alone.\n" - }, - { - "name": "workflows.md", - "node_type": "file", - "content": "# Workflow Patterns\n\n## Sequential Workflows\n\nFor complex tasks, break operations into clear, sequential steps. It is often helpful to give Claude an overview of the process towards the beginning of SKILL.md:\n\n```markdown\nFilling a PDF form involves these steps:\n\n1. Analyze the form (run analyze_form.py)\n2. Create field mapping (edit fields.json)\n3. Validate mapping (run validate_fields.py)\n4. Fill the form (run fill_form.py)\n5. Verify output (run verify_output.py)\n```\n\n## Conditional Workflows\n\nFor tasks with branching logic, guide Claude through decision points:\n\n```markdown\n1. Determine the modification type:\n **Creating new content?** → Follow \"Creation workflow\" below\n **Editing existing content?** → Follow \"Editing workflow\" below\n\n2. Creation workflow: [steps]\n3. Editing workflow: [steps]\n```" + "content": "# JSON Schemas\n\nThis document defines the JSON schemas used by skill-creator.\n\n---\n\n## evals.json\n\nDefines the evals for a skill. Located at `evals/evals.json` within the skill directory.\n\n```json\n{\n \"skill_name\": \"example-skill\",\n \"evals\": [\n {\n \"id\": 1,\n \"prompt\": \"User's example prompt\",\n \"expected_output\": \"Description of expected result\",\n \"files\": [\"evals/files/sample1.pdf\"],\n \"expectations\": [\n \"The output includes X\",\n \"The skill used script Y\"\n ]\n }\n ]\n}\n```\n\n**Fields:**\n- `skill_name`: Name matching the skill's frontmatter\n- `evals[].id`: Unique integer identifier\n- `evals[].prompt`: The task to execute\n- `evals[].expected_output`: Human-readable description of success\n- `evals[].files`: Optional list of input file paths (relative to skill root)\n- `evals[].expectations`: List of verifiable statements\n\n---\n\n## history.json\n\nTracks version progression in Improve mode. Located at workspace root.\n\n```json\n{\n \"started_at\": \"2026-01-15T10:30:00Z\",\n \"skill_name\": \"pdf\",\n \"current_best\": \"v2\",\n \"iterations\": [\n {\n \"version\": \"v0\",\n \"parent\": null,\n \"expectation_pass_rate\": 0.65,\n \"grading_result\": \"baseline\",\n \"is_current_best\": false\n },\n {\n \"version\": \"v1\",\n \"parent\": \"v0\",\n \"expectation_pass_rate\": 0.75,\n \"grading_result\": \"won\",\n \"is_current_best\": false\n },\n {\n \"version\": \"v2\",\n \"parent\": \"v1\",\n \"expectation_pass_rate\": 0.85,\n \"grading_result\": \"won\",\n \"is_current_best\": true\n }\n ]\n}\n```\n\n**Fields:**\n- `started_at`: ISO timestamp of when improvement started\n- `skill_name`: Name of the skill being improved\n- `current_best`: Version identifier of the best performer\n- `iterations[].version`: Version identifier (v0, v1, ...)\n- `iterations[].parent`: Parent version this was derived from\n- `iterations[].expectation_pass_rate`: Pass rate from grading\n- `iterations[].grading_result`: \"baseline\", \"won\", \"lost\", or \"tie\"\n- `iterations[].is_current_best`: Whether this is the current best version\n\n---\n\n## grading.json\n\nOutput from the grader agent. Located at `/grading.json`.\n\n```json\n{\n \"expectations\": [\n {\n \"text\": \"The output includes the name 'John Smith'\",\n \"passed\": true,\n \"evidence\": \"Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'\"\n },\n {\n \"text\": \"The spreadsheet has a SUM formula in cell B10\",\n \"passed\": false,\n \"evidence\": \"No spreadsheet was created. The output was a text file.\"\n }\n ],\n \"summary\": {\n \"passed\": 2,\n \"failed\": 1,\n \"total\": 3,\n \"pass_rate\": 0.67\n },\n \"execution_metrics\": {\n \"tool_calls\": {\n \"Read\": 5,\n \"Write\": 2,\n \"Bash\": 8\n },\n \"total_tool_calls\": 15,\n \"total_steps\": 6,\n \"errors_encountered\": 0,\n \"output_chars\": 12450,\n \"transcript_chars\": 3200\n },\n \"timing\": {\n \"executor_duration_seconds\": 165.0,\n \"grader_duration_seconds\": 26.0,\n \"total_duration_seconds\": 191.0\n },\n \"claims\": [\n {\n \"claim\": \"The form has 12 fillable fields\",\n \"type\": \"factual\",\n \"verified\": true,\n \"evidence\": \"Counted 12 fields in field_info.json\"\n }\n ],\n \"user_notes_summary\": {\n \"uncertainties\": [\"Used 2023 data, may be stale\"],\n \"needs_review\": [],\n \"workarounds\": [\"Fell back to text overlay for non-fillable fields\"]\n },\n \"eval_feedback\": {\n \"suggestions\": [\n {\n \"assertion\": \"The output includes the name 'John Smith'\",\n \"reason\": \"A hallucinated document that mentions the name would also pass\"\n }\n ],\n \"overall\": \"Assertions check presence but not correctness.\"\n }\n}\n```\n\n**Fields:**\n- `expectations[]`: Graded expectations with evidence\n- `summary`: Aggregate pass/fail counts\n- `execution_metrics`: Tool usage and output size (from executor's metrics.json)\n- `timing`: Wall clock timing (from timing.json)\n- `claims`: Extracted and verified claims from the output\n- `user_notes_summary`: Issues flagged by the executor\n- `eval_feedback`: (optional) Improvement suggestions for the evals, only present when the grader identifies issues worth raising\n\n---\n\n## metrics.json\n\nOutput from the executor agent. Located at `/outputs/metrics.json`.\n\n```json\n{\n \"tool_calls\": {\n \"Read\": 5,\n \"Write\": 2,\n \"Bash\": 8,\n \"Edit\": 1,\n \"Glob\": 2,\n \"Grep\": 0\n },\n \"total_tool_calls\": 18,\n \"total_steps\": 6,\n \"files_created\": [\"filled_form.pdf\", \"field_values.json\"],\n \"errors_encountered\": 0,\n \"output_chars\": 12450,\n \"transcript_chars\": 3200\n}\n```\n\n**Fields:**\n- `tool_calls`: Count per tool type\n- `total_tool_calls`: Sum of all tool calls\n- `total_steps`: Number of major execution steps\n- `files_created`: List of output files created\n- `errors_encountered`: Number of errors during execution\n- `output_chars`: Total character count of output files\n- `transcript_chars`: Character count of transcript\n\n---\n\n## timing.json\n\nWall clock timing for a run. Located at `/timing.json`.\n\n**How to capture:** When a subagent task completes, the task notification includes `total_tokens` and `duration_ms`. Save these immediately — they are not persisted anywhere else and cannot be recovered after the fact.\n\n```json\n{\n \"total_tokens\": 84852,\n \"duration_ms\": 23332,\n \"total_duration_seconds\": 23.3,\n \"executor_start\": \"2026-01-15T10:30:00Z\",\n \"executor_end\": \"2026-01-15T10:32:45Z\",\n \"executor_duration_seconds\": 165.0,\n \"grader_start\": \"2026-01-15T10:32:46Z\",\n \"grader_end\": \"2026-01-15T10:33:12Z\",\n \"grader_duration_seconds\": 26.0\n}\n```\n\n---\n\n## benchmark.json\n\nOutput from Benchmark mode. Located at `benchmarks//benchmark.json`.\n\n```json\n{\n \"metadata\": {\n \"skill_name\": \"pdf\",\n \"skill_path\": \"/path/to/pdf\",\n \"executor_model\": \"claude-sonnet-4-20250514\",\n \"analyzer_model\": \"most-capable-model\",\n \"timestamp\": \"2026-01-15T10:30:00Z\",\n \"evals_run\": [1, 2, 3],\n \"runs_per_configuration\": 3\n },\n\n \"runs\": [\n {\n \"eval_id\": 1,\n \"eval_name\": \"Ocean\",\n \"configuration\": \"with_skill\",\n \"run_number\": 1,\n \"result\": {\n \"pass_rate\": 0.85,\n \"passed\": 6,\n \"failed\": 1,\n \"total\": 7,\n \"time_seconds\": 42.5,\n \"tokens\": 3800,\n \"tool_calls\": 18,\n \"errors\": 0\n },\n \"expectations\": [\n {\"text\": \"...\", \"passed\": true, \"evidence\": \"...\"}\n ],\n \"notes\": [\n \"Used 2023 data, may be stale\",\n \"Fell back to text overlay for non-fillable fields\"\n ]\n }\n ],\n\n \"run_summary\": {\n \"with_skill\": {\n \"pass_rate\": {\"mean\": 0.85, \"stddev\": 0.05, \"min\": 0.80, \"max\": 0.90},\n \"time_seconds\": {\"mean\": 45.0, \"stddev\": 12.0, \"min\": 32.0, \"max\": 58.0},\n \"tokens\": {\"mean\": 3800, \"stddev\": 400, \"min\": 3200, \"max\": 4100}\n },\n \"without_skill\": {\n \"pass_rate\": {\"mean\": 0.35, \"stddev\": 0.08, \"min\": 0.28, \"max\": 0.45},\n \"time_seconds\": {\"mean\": 32.0, \"stddev\": 8.0, \"min\": 24.0, \"max\": 42.0},\n \"tokens\": {\"mean\": 2100, \"stddev\": 300, \"min\": 1800, \"max\": 2500}\n },\n \"delta\": {\n \"pass_rate\": \"+0.50\",\n \"time_seconds\": \"+13.0\",\n \"tokens\": \"+1700\"\n }\n },\n\n \"notes\": [\n \"Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value\",\n \"Eval 3 shows high variance (50% ± 40%) - may be flaky or model-dependent\",\n \"Without-skill runs consistently fail on table extraction expectations\",\n \"Skill adds 13s average execution time but improves pass rate by 50%\"\n ]\n}\n```\n\n**Fields:**\n- `metadata`: Information about the benchmark run\n - `skill_name`: Name of the skill\n - `timestamp`: When the benchmark was run\n - `evals_run`: List of eval names or IDs\n - `runs_per_configuration`: Number of runs per config (e.g. 3)\n- `runs[]`: Individual run results\n - `eval_id`: Numeric eval identifier\n - `eval_name`: Human-readable eval name (used as section header in the viewer)\n - `configuration`: Must be `\"with_skill\"` or `\"without_skill\"` (the viewer uses this exact string for grouping and color coding)\n - `run_number`: Integer run number (1, 2, 3...)\n - `result`: Nested object with `pass_rate`, `passed`, `total`, `time_seconds`, `tokens`, `errors`\n- `run_summary`: Statistical aggregates per configuration\n - `with_skill` / `without_skill`: Each contains `pass_rate`, `time_seconds`, `tokens` objects with `mean` and `stddev` fields\n - `delta`: Difference strings like `\"+0.50\"`, `\"+13.0\"`, `\"+1700\"`\n- `notes`: Freeform observations from the analyzer\n\n**Important:** The viewer reads these field names exactly. Using `config` instead of `configuration`, or putting `pass_rate` at the top level of a run instead of nested under `result`, will cause the viewer to show empty/zero values. Always reference this schema when generating benchmark.json manually.\n\n---\n\n## comparison.json\n\nOutput from blind comparator. Located at `/comparison-N.json`.\n\n```json\n{\n \"winner\": \"A\",\n \"reasoning\": \"Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.\",\n \"rubric\": {\n \"A\": {\n \"content\": {\n \"correctness\": 5,\n \"completeness\": 5,\n \"accuracy\": 4\n },\n \"structure\": {\n \"organization\": 4,\n \"formatting\": 5,\n \"usability\": 4\n },\n \"content_score\": 4.7,\n \"structure_score\": 4.3,\n \"overall_score\": 9.0\n },\n \"B\": {\n \"content\": {\n \"correctness\": 3,\n \"completeness\": 2,\n \"accuracy\": 3\n },\n \"structure\": {\n \"organization\": 3,\n \"formatting\": 2,\n \"usability\": 3\n },\n \"content_score\": 2.7,\n \"structure_score\": 2.7,\n \"overall_score\": 5.4\n }\n },\n \"output_quality\": {\n \"A\": {\n \"score\": 9,\n \"strengths\": [\"Complete solution\", \"Well-formatted\", \"All fields present\"],\n \"weaknesses\": [\"Minor style inconsistency in header\"]\n },\n \"B\": {\n \"score\": 5,\n \"strengths\": [\"Readable output\", \"Correct basic structure\"],\n \"weaknesses\": [\"Missing date field\", \"Formatting inconsistencies\", \"Partial data extraction\"]\n }\n },\n \"expectation_results\": {\n \"A\": {\n \"passed\": 4,\n \"total\": 5,\n \"pass_rate\": 0.80,\n \"details\": [\n {\"text\": \"Output includes name\", \"passed\": true}\n ]\n },\n \"B\": {\n \"passed\": 3,\n \"total\": 5,\n \"pass_rate\": 0.60,\n \"details\": [\n {\"text\": \"Output includes name\", \"passed\": true}\n ]\n }\n }\n}\n```\n\n---\n\n## analysis.json\n\nOutput from post-hoc analyzer. Located at `/analysis.json`.\n\n```json\n{\n \"comparison_summary\": {\n \"winner\": \"A\",\n \"winner_skill\": \"path/to/winner/skill\",\n \"loser_skill\": \"path/to/loser/skill\",\n \"comparator_reasoning\": \"Brief summary of why comparator chose winner\"\n },\n \"winner_strengths\": [\n \"Clear step-by-step instructions for handling multi-page documents\",\n \"Included validation script that caught formatting errors\"\n ],\n \"loser_weaknesses\": [\n \"Vague instruction 'process the document appropriately' led to inconsistent behavior\",\n \"No script for validation, agent had to improvise\"\n ],\n \"instruction_following\": {\n \"winner\": {\n \"score\": 9,\n \"issues\": [\"Minor: skipped optional logging step\"]\n },\n \"loser\": {\n \"score\": 6,\n \"issues\": [\n \"Did not use the skill's formatting template\",\n \"Invented own approach instead of following step 3\"\n ]\n }\n },\n \"improvement_suggestions\": [\n {\n \"priority\": \"high\",\n \"category\": \"instructions\",\n \"suggestion\": \"Replace 'process the document appropriately' with explicit steps\",\n \"expected_impact\": \"Would eliminate ambiguity that caused inconsistent behavior\"\n }\n ],\n \"transcript_insights\": {\n \"winner_execution_pattern\": \"Read skill -> Followed 5-step process -> Used validation script\",\n \"loser_execution_pattern\": \"Read skill -> Unclear on approach -> Tried 3 different methods\"\n }\n}\n```\n" } ] }, @@ -29,19 +72,49 @@ const children: SkillTemplateNode[] = [ "node_type": "folder", "children": [ { - "name": "init_skill.py", + "name": "__init__.py", "node_type": "file", - "content": "#!/usr/bin/env python3\n\"\"\"\nSkill Initializer - Creates a new skill from template\n\nUsage:\n init_skill.py --path \n\nExamples:\n init_skill.py my-new-skill --path skills/public\n init_skill.py my-api-helper --path skills/private\n init_skill.py custom-skill --path /custom/location\n\"\"\"\n\nimport sys\nfrom pathlib import Path\n\n\nSKILL_TEMPLATE = \"\"\"---\nname: {skill_name}\ndescription: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.]\n---\n\n# {skill_title}\n\n## Overview\n\n[TODO: 1-2 sentences explaining what this skill enables]\n\n## Structuring This Skill\n\n[TODO: Choose the structure that best fits this skill's purpose. Common patterns:\n\n**1. Workflow-Based** (best for sequential processes)\n- Works well when there are clear step-by-step procedures\n- Example: DOCX skill with \"Workflow Decision Tree\" → \"Reading\" → \"Creating\" → \"Editing\"\n- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2...\n\n**2. Task-Based** (best for tool collections)\n- Works well when the skill offers different operations/capabilities\n- Example: PDF skill with \"Quick Start\" → \"Merge PDFs\" → \"Split PDFs\" → \"Extract Text\"\n- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2...\n\n**3. Reference/Guidelines** (best for standards or specifications)\n- Works well for brand guidelines, coding standards, or requirements\n- Example: Brand styling with \"Brand Guidelines\" → \"Colors\" → \"Typography\" → \"Features\"\n- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage...\n\n**4. Capabilities-Based** (best for integrated systems)\n- Works well when the skill provides multiple interrelated features\n- Example: Product Management with \"Core Capabilities\" → numbered capability list\n- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature...\n\nPatterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations).\n\nDelete this entire \"Structuring This Skill\" section when done - it's just guidance.]\n\n## [TODO: Replace with the first main section based on chosen structure]\n\n[TODO: Add content here. See examples in existing skills:\n- Code samples for technical skills\n- Decision trees for complex workflows\n- Concrete examples with realistic user requests\n- References to scripts/templates/references as needed]\n\n## Resources\n\nThis skill includes example resource directories that demonstrate how to organize different types of bundled resources:\n\n### scripts/\nExecutable code (Python/Bash/etc.) that can be run directly to perform specific operations.\n\n**Examples from other skills:**\n- PDF skill: `fill_fillable_fields.py`, `extract_form_field_info.py` - utilities for PDF manipulation\n- DOCX skill: `document.py`, `utilities.py` - Python modules for document processing\n\n**Appropriate for:** Python scripts, shell scripts, or any executable code that performs automation, data processing, or specific operations.\n\n**Note:** Scripts may be executed without loading into context, but can still be read by Claude for patching or environment adjustments.\n\n### references/\nDocumentation and reference material intended to be loaded into context to inform Claude's process and thinking.\n\n**Examples from other skills:**\n- Product management: `communication.md`, `context_building.md` - detailed workflow guides\n- BigQuery: API reference documentation and query examples\n- Finance: Schema documentation, company policies\n\n**Appropriate for:** In-depth documentation, API references, database schemas, comprehensive guides, or any detailed information that Claude should reference while working.\n\n### assets/\nFiles not intended to be loaded into context, but rather used within the output Claude produces.\n\n**Examples from other skills:**\n- Brand styling: PowerPoint template files (.pptx), logo files\n- Frontend builder: HTML/React boilerplate project directories\n- Typography: Font files (.ttf, .woff2)\n\n**Appropriate for:** Templates, boilerplate code, document templates, images, icons, fonts, or any files meant to be copied or used in the final output.\n\n---\n\n**Any unneeded directories can be deleted.** Not every skill requires all three types of resources.\n\"\"\"\n\nEXAMPLE_SCRIPT = '''#!/usr/bin/env python3\n\"\"\"\nExample helper script for {skill_name}\n\nThis is a placeholder script that can be executed directly.\nReplace with actual implementation or delete if not needed.\n\nExample real scripts from other skills:\n- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields\n- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images\n\"\"\"\n\ndef main():\n print(\"This is an example script for {skill_name}\")\n # TODO: Add actual script logic here\n # This could be data processing, file conversion, API calls, etc.\n\nif __name__ == \"__main__\":\n main()\n'''\n\nEXAMPLE_REFERENCE = \"\"\"# Reference Documentation for {skill_title}\n\nThis is a placeholder for detailed reference documentation.\nReplace with actual reference content or delete if not needed.\n\nExample real reference docs from other skills:\n- product-management/references/communication.md - Comprehensive guide for status updates\n- product-management/references/context_building.md - Deep-dive on gathering context\n- bigquery/references/ - API references and query examples\n\n## When Reference Docs Are Useful\n\nReference docs are ideal for:\n- Comprehensive API documentation\n- Detailed workflow guides\n- Complex multi-step processes\n- Information too lengthy for main SKILL.md\n- Content that's only needed for specific use cases\n\n## Structure Suggestions\n\n### API Reference Example\n- Overview\n- Authentication\n- Endpoints with examples\n- Error codes\n- Rate limits\n\n### Workflow Guide Example\n- Prerequisites\n- Step-by-step instructions\n- Common patterns\n- Troubleshooting\n- Best practices\n\"\"\"\n\nEXAMPLE_ASSET = \"\"\"# Example Asset File\n\nThis placeholder represents where asset files would be stored.\nReplace with actual asset files (templates, images, fonts, etc.) or delete if not needed.\n\nAsset files are NOT intended to be loaded into context, but rather used within\nthe output Claude produces.\n\nExample asset files from other skills:\n- Brand guidelines: logo.png, slides_template.pptx\n- Frontend builder: hello-world/ directory with HTML/React boilerplate\n- Typography: custom-font.ttf, font-family.woff2\n- Data: sample_data.csv, test_dataset.json\n\n## Common Asset Types\n\n- Templates: .pptx, .docx, boilerplate directories\n- Images: .png, .jpg, .svg, .gif\n- Fonts: .ttf, .otf, .woff, .woff2\n- Boilerplate code: Project directories, starter files\n- Icons: .ico, .svg\n- Data files: .csv, .json, .xml, .yaml\n\nNote: This is a text placeholder. Actual assets can be any file type.\n\"\"\"\n\n\ndef title_case_skill_name(skill_name):\n \"\"\"Convert hyphenated skill name to Title Case for display.\"\"\"\n return ' '.join(word.capitalize() for word in skill_name.split('-'))\n\n\ndef init_skill(skill_name, path):\n \"\"\"\n Initialize a new skill directory with template SKILL.md.\n\n Args:\n skill_name: Name of the skill\n path: Path where the skill directory should be created\n\n Returns:\n Path to created skill directory, or None if error\n \"\"\"\n # Determine skill directory path\n skill_dir = Path(path).resolve() / skill_name\n\n # Check if directory already exists\n if skill_dir.exists():\n print(f\"❌ Error: Skill directory already exists: {skill_dir}\")\n return None\n\n # Create skill directory\n try:\n skill_dir.mkdir(parents=True, exist_ok=False)\n print(f\"✅ Created skill directory: {skill_dir}\")\n except Exception as e:\n print(f\"❌ Error creating directory: {e}\")\n return None\n\n # Create SKILL.md from template\n skill_title = title_case_skill_name(skill_name)\n skill_content = SKILL_TEMPLATE.format(\n skill_name=skill_name,\n skill_title=skill_title\n )\n\n skill_md_path = skill_dir / 'SKILL.md'\n try:\n skill_md_path.write_text(skill_content)\n print(\"✅ Created SKILL.md\")\n except Exception as e:\n print(f\"❌ Error creating SKILL.md: {e}\")\n return None\n\n # Create resource directories with example files\n try:\n # Create scripts/ directory with example script\n scripts_dir = skill_dir / 'scripts'\n scripts_dir.mkdir(exist_ok=True)\n example_script = scripts_dir / 'example.py'\n example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name))\n example_script.chmod(0o755)\n print(\"✅ Created scripts/example.py\")\n\n # Create references/ directory with example reference doc\n references_dir = skill_dir / 'references'\n references_dir.mkdir(exist_ok=True)\n example_reference = references_dir / 'api_reference.md'\n example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title))\n print(\"✅ Created references/api_reference.md\")\n\n # Create assets/ directory with example asset placeholder\n assets_dir = skill_dir / 'assets'\n assets_dir.mkdir(exist_ok=True)\n example_asset = assets_dir / 'example_asset.txt'\n example_asset.write_text(EXAMPLE_ASSET)\n print(\"✅ Created assets/example_asset.txt\")\n except Exception as e:\n print(f\"❌ Error creating resource directories: {e}\")\n return None\n\n # Print next steps\n print(f\"\\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}\")\n print(\"\\nNext steps:\")\n print(\"1. Edit SKILL.md to complete the TODO items and update the description\")\n print(\"2. Customize or delete the example files in scripts/, references/, and assets/\")\n print(\"3. Run the validator when ready to check the skill structure\")\n\n return skill_dir\n\n\ndef main():\n if len(sys.argv) < 4 or sys.argv[2] != '--path':\n print(\"Usage: init_skill.py --path \")\n print(\"\\nSkill name requirements:\")\n print(\" - Hyphen-case identifier (e.g., 'data-analyzer')\")\n print(\" - Lowercase letters, digits, and hyphens only\")\n print(\" - Max 40 characters\")\n print(\" - Must match directory name exactly\")\n print(\"\\nExamples:\")\n print(\" init_skill.py my-new-skill --path skills/public\")\n print(\" init_skill.py my-api-helper --path skills/private\")\n print(\" init_skill.py custom-skill --path /custom/location\")\n sys.exit(1)\n\n skill_name = sys.argv[1]\n path = sys.argv[3]\n\n print(f\"🚀 Initializing skill: {skill_name}\")\n print(f\" Location: {path}\")\n print()\n\n result = init_skill(skill_name, path)\n\n if result:\n sys.exit(0)\n else:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n" + "content": "" + }, + { + "name": "aggregate_benchmark.py", + "node_type": "file", + "content": "#!/usr/bin/env python3\n\"\"\"\nAggregate individual run results into benchmark summary statistics.\n\nReads grading.json files from run directories and produces:\n- run_summary with mean, stddev, min, max for each metric\n- delta between with_skill and without_skill configurations\n\nUsage:\n python aggregate_benchmark.py \n\nExample:\n python aggregate_benchmark.py benchmarks/2026-01-15T10-30-00/\n\nThe script supports two directory layouts:\n\n Workspace layout (from skill-creator iterations):\n /\n └── eval-N/\n ├── with_skill/\n │ ├── run-1/grading.json\n │ └── run-2/grading.json\n └── without_skill/\n ├── run-1/grading.json\n └── run-2/grading.json\n\n Legacy layout (with runs/ subdirectory):\n /\n └── runs/\n └── eval-N/\n ├── with_skill/\n │ └── run-1/grading.json\n └── without_skill/\n └── run-1/grading.json\n\"\"\"\n\nimport argparse\nimport json\nimport math\nimport sys\nfrom datetime import datetime, timezone\nfrom pathlib import Path\n\n\ndef calculate_stats(values: list[float]) -> dict:\n \"\"\"Calculate mean, stddev, min, max for a list of values.\"\"\"\n if not values:\n return {\"mean\": 0.0, \"stddev\": 0.0, \"min\": 0.0, \"max\": 0.0}\n\n n = len(values)\n mean = sum(values) / n\n\n if n > 1:\n variance = sum((x - mean) ** 2 for x in values) / (n - 1)\n stddev = math.sqrt(variance)\n else:\n stddev = 0.0\n\n return {\n \"mean\": round(mean, 4),\n \"stddev\": round(stddev, 4),\n \"min\": round(min(values), 4),\n \"max\": round(max(values), 4)\n }\n\n\ndef load_run_results(benchmark_dir: Path) -> dict:\n \"\"\"\n Load all run results from a benchmark directory.\n\n Returns dict keyed by config name (e.g. \"with_skill\"/\"without_skill\",\n or \"new_skill\"/\"old_skill\"), each containing a list of run results.\n \"\"\"\n # Support both layouts: eval dirs directly under benchmark_dir, or under runs/\n runs_dir = benchmark_dir / \"runs\"\n if runs_dir.exists():\n search_dir = runs_dir\n elif list(benchmark_dir.glob(\"eval-*\")):\n search_dir = benchmark_dir\n else:\n print(f\"No eval directories found in {benchmark_dir} or {benchmark_dir / 'runs'}\")\n return {}\n\n results: dict[str, list] = {}\n\n for eval_idx, eval_dir in enumerate(sorted(search_dir.glob(\"eval-*\"))):\n metadata_path = eval_dir / \"eval_metadata.json\"\n if metadata_path.exists():\n try:\n with open(metadata_path) as mf:\n eval_id = json.load(mf).get(\"eval_id\", eval_idx)\n except (json.JSONDecodeError, OSError):\n eval_id = eval_idx\n else:\n try:\n eval_id = int(eval_dir.name.split(\"-\")[1])\n except ValueError:\n eval_id = eval_idx\n\n # Discover config directories dynamically rather than hardcoding names\n for config_dir in sorted(eval_dir.iterdir()):\n if not config_dir.is_dir():\n continue\n # Skip non-config directories (inputs, outputs, etc.)\n if not list(config_dir.glob(\"run-*\")):\n continue\n config = config_dir.name\n if config not in results:\n results[config] = []\n\n for run_dir in sorted(config_dir.glob(\"run-*\")):\n run_number = int(run_dir.name.split(\"-\")[1])\n grading_file = run_dir / \"grading.json\"\n\n if not grading_file.exists():\n print(f\"Warning: grading.json not found in {run_dir}\")\n continue\n\n try:\n with open(grading_file) as f:\n grading = json.load(f)\n except json.JSONDecodeError as e:\n print(f\"Warning: Invalid JSON in {grading_file}: {e}\")\n continue\n\n # Extract metrics\n result = {\n \"eval_id\": eval_id,\n \"run_number\": run_number,\n \"pass_rate\": grading.get(\"summary\", {}).get(\"pass_rate\", 0.0),\n \"passed\": grading.get(\"summary\", {}).get(\"passed\", 0),\n \"failed\": grading.get(\"summary\", {}).get(\"failed\", 0),\n \"total\": grading.get(\"summary\", {}).get(\"total\", 0),\n }\n\n # Extract timing — check grading.json first, then sibling timing.json\n timing = grading.get(\"timing\", {})\n result[\"time_seconds\"] = timing.get(\"total_duration_seconds\", 0.0)\n timing_file = run_dir / \"timing.json\"\n if result[\"time_seconds\"] == 0.0 and timing_file.exists():\n try:\n with open(timing_file) as tf:\n timing_data = json.load(tf)\n result[\"time_seconds\"] = timing_data.get(\"total_duration_seconds\", 0.0)\n result[\"tokens\"] = timing_data.get(\"total_tokens\", 0)\n except json.JSONDecodeError:\n pass\n\n # Extract metrics if available\n metrics = grading.get(\"execution_metrics\", {})\n result[\"tool_calls\"] = metrics.get(\"total_tool_calls\", 0)\n if not result.get(\"tokens\"):\n result[\"tokens\"] = metrics.get(\"output_chars\", 0)\n result[\"errors\"] = metrics.get(\"errors_encountered\", 0)\n\n # Extract expectations — viewer requires fields: text, passed, evidence\n raw_expectations = grading.get(\"expectations\", [])\n for exp in raw_expectations:\n if \"text\" not in exp or \"passed\" not in exp:\n print(f\"Warning: expectation in {grading_file} missing required fields (text, passed, evidence): {exp}\")\n result[\"expectations\"] = raw_expectations\n\n # Extract notes from user_notes_summary\n notes_summary = grading.get(\"user_notes_summary\", {})\n notes = []\n notes.extend(notes_summary.get(\"uncertainties\", []))\n notes.extend(notes_summary.get(\"needs_review\", []))\n notes.extend(notes_summary.get(\"workarounds\", []))\n result[\"notes\"] = notes\n\n results[config].append(result)\n\n return results\n\n\ndef aggregate_results(results: dict) -> dict:\n \"\"\"\n Aggregate run results into summary statistics.\n\n Returns run_summary with stats for each configuration and delta.\n \"\"\"\n run_summary = {}\n configs = list(results.keys())\n\n for config in configs:\n runs = results.get(config, [])\n\n if not runs:\n run_summary[config] = {\n \"pass_rate\": {\"mean\": 0.0, \"stddev\": 0.0, \"min\": 0.0, \"max\": 0.0},\n \"time_seconds\": {\"mean\": 0.0, \"stddev\": 0.0, \"min\": 0.0, \"max\": 0.0},\n \"tokens\": {\"mean\": 0, \"stddev\": 0, \"min\": 0, \"max\": 0}\n }\n continue\n\n pass_rates = [r[\"pass_rate\"] for r in runs]\n times = [r[\"time_seconds\"] for r in runs]\n tokens = [r.get(\"tokens\", 0) for r in runs]\n\n run_summary[config] = {\n \"pass_rate\": calculate_stats(pass_rates),\n \"time_seconds\": calculate_stats(times),\n \"tokens\": calculate_stats(tokens)\n }\n\n # Calculate delta between the first two configs (if two exist)\n if len(configs) >= 2:\n primary = run_summary.get(configs[0], {})\n baseline = run_summary.get(configs[1], {})\n else:\n primary = run_summary.get(configs[0], {}) if configs else {}\n baseline = {}\n\n delta_pass_rate = primary.get(\"pass_rate\", {}).get(\"mean\", 0) - baseline.get(\"pass_rate\", {}).get(\"mean\", 0)\n delta_time = primary.get(\"time_seconds\", {}).get(\"mean\", 0) - baseline.get(\"time_seconds\", {}).get(\"mean\", 0)\n delta_tokens = primary.get(\"tokens\", {}).get(\"mean\", 0) - baseline.get(\"tokens\", {}).get(\"mean\", 0)\n\n run_summary[\"delta\"] = {\n \"pass_rate\": f\"{delta_pass_rate:+.2f}\",\n \"time_seconds\": f\"{delta_time:+.1f}\",\n \"tokens\": f\"{delta_tokens:+.0f}\"\n }\n\n return run_summary\n\n\ndef generate_benchmark(benchmark_dir: Path, skill_name: str = \"\", skill_path: str = \"\") -> dict:\n \"\"\"\n Generate complete benchmark.json from run results.\n \"\"\"\n results = load_run_results(benchmark_dir)\n run_summary = aggregate_results(results)\n\n # Build runs array for benchmark.json\n runs = []\n for config in results:\n for result in results[config]:\n runs.append({\n \"eval_id\": result[\"eval_id\"],\n \"configuration\": config,\n \"run_number\": result[\"run_number\"],\n \"result\": {\n \"pass_rate\": result[\"pass_rate\"],\n \"passed\": result[\"passed\"],\n \"failed\": result[\"failed\"],\n \"total\": result[\"total\"],\n \"time_seconds\": result[\"time_seconds\"],\n \"tokens\": result.get(\"tokens\", 0),\n \"tool_calls\": result.get(\"tool_calls\", 0),\n \"errors\": result.get(\"errors\", 0)\n },\n \"expectations\": result[\"expectations\"],\n \"notes\": result[\"notes\"]\n })\n\n # Determine eval IDs from results\n eval_ids = sorted(set(\n r[\"eval_id\"]\n for config in results.values()\n for r in config\n ))\n\n benchmark = {\n \"metadata\": {\n \"skill_name\": skill_name or \"\",\n \"skill_path\": skill_path or \"\",\n \"executor_model\": \"\",\n \"analyzer_model\": \"\",\n \"timestamp\": datetime.now(timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n \"evals_run\": eval_ids,\n \"runs_per_configuration\": 3\n },\n \"runs\": runs,\n \"run_summary\": run_summary,\n \"notes\": [] # To be filled by analyzer\n }\n\n return benchmark\n\n\ndef generate_markdown(benchmark: dict) -> str:\n \"\"\"Generate human-readable benchmark.md from benchmark data.\"\"\"\n metadata = benchmark[\"metadata\"]\n run_summary = benchmark[\"run_summary\"]\n\n # Determine config names (excluding \"delta\")\n configs = [k for k in run_summary if k != \"delta\"]\n config_a = configs[0] if len(configs) >= 1 else \"config_a\"\n config_b = configs[1] if len(configs) >= 2 else \"config_b\"\n label_a = config_a.replace(\"_\", \" \").title()\n label_b = config_b.replace(\"_\", \" \").title()\n\n lines = [\n f\"# Skill Benchmark: {metadata['skill_name']}\",\n \"\",\n f\"**Model**: {metadata['executor_model']}\",\n f\"**Date**: {metadata['timestamp']}\",\n f\"**Evals**: {', '.join(map(str, metadata['evals_run']))} ({metadata['runs_per_configuration']} runs each per configuration)\",\n \"\",\n \"## Summary\",\n \"\",\n f\"| Metric | {label_a} | {label_b} | Delta |\",\n \"|--------|------------|---------------|-------|\",\n ]\n\n a_summary = run_summary.get(config_a, {})\n b_summary = run_summary.get(config_b, {})\n delta = run_summary.get(\"delta\", {})\n\n # Format pass rate\n a_pr = a_summary.get(\"pass_rate\", {})\n b_pr = b_summary.get(\"pass_rate\", {})\n lines.append(f\"| Pass Rate | {a_pr.get('mean', 0)*100:.0f}% ± {a_pr.get('stddev', 0)*100:.0f}% | {b_pr.get('mean', 0)*100:.0f}% ± {b_pr.get('stddev', 0)*100:.0f}% | {delta.get('pass_rate', '—')} |\")\n\n # Format time\n a_time = a_summary.get(\"time_seconds\", {})\n b_time = b_summary.get(\"time_seconds\", {})\n lines.append(f\"| Time | {a_time.get('mean', 0):.1f}s ± {a_time.get('stddev', 0):.1f}s | {b_time.get('mean', 0):.1f}s ± {b_time.get('stddev', 0):.1f}s | {delta.get('time_seconds', '—')}s |\")\n\n # Format tokens\n a_tokens = a_summary.get(\"tokens\", {})\n b_tokens = b_summary.get(\"tokens\", {})\n lines.append(f\"| Tokens | {a_tokens.get('mean', 0):.0f} ± {a_tokens.get('stddev', 0):.0f} | {b_tokens.get('mean', 0):.0f} ± {b_tokens.get('stddev', 0):.0f} | {delta.get('tokens', '—')} |\")\n\n # Notes section\n if benchmark.get(\"notes\"):\n lines.extend([\n \"\",\n \"## Notes\",\n \"\"\n ])\n for note in benchmark[\"notes\"]:\n lines.append(f\"- {note}\")\n\n return \"\\n\".join(lines)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Aggregate benchmark run results into summary statistics\"\n )\n parser.add_argument(\n \"benchmark_dir\",\n type=Path,\n help=\"Path to the benchmark directory\"\n )\n parser.add_argument(\n \"--skill-name\",\n default=\"\",\n help=\"Name of the skill being benchmarked\"\n )\n parser.add_argument(\n \"--skill-path\",\n default=\"\",\n help=\"Path to the skill being benchmarked\"\n )\n parser.add_argument(\n \"--output\", \"-o\",\n type=Path,\n help=\"Output path for benchmark.json (default: /benchmark.json)\"\n )\n\n args = parser.parse_args()\n\n if not args.benchmark_dir.exists():\n print(f\"Directory not found: {args.benchmark_dir}\")\n sys.exit(1)\n\n # Generate benchmark\n benchmark = generate_benchmark(args.benchmark_dir, args.skill_name, args.skill_path)\n\n # Determine output paths\n output_json = args.output or (args.benchmark_dir / \"benchmark.json\")\n output_md = output_json.with_suffix(\".md\")\n\n # Write benchmark.json\n with open(output_json, \"w\") as f:\n json.dump(benchmark, f, indent=2)\n print(f\"Generated: {output_json}\")\n\n # Write benchmark.md\n markdown = generate_markdown(benchmark)\n with open(output_md, \"w\") as f:\n f.write(markdown)\n print(f\"Generated: {output_md}\")\n\n # Print summary\n run_summary = benchmark[\"run_summary\"]\n configs = [k for k in run_summary if k != \"delta\"]\n delta = run_summary.get(\"delta\", {})\n\n print(f\"\\nSummary:\")\n for config in configs:\n pr = run_summary[config][\"pass_rate\"][\"mean\"]\n label = config.replace(\"_\", \" \").title()\n print(f\" {label}: {pr*100:.1f}% pass rate\")\n print(f\" Delta: {delta.get('pass_rate', '—')}\")\n\n\nif __name__ == \"__main__\":\n main()\n" + }, + { + "name": "generate_report.py", + "node_type": "file", + "content": "#!/usr/bin/env python3\n\"\"\"Generate an HTML report from run_loop.py output.\n\nTakes the JSON output from run_loop.py and generates a visual HTML report\nshowing each description attempt with check/x for each test case.\nDistinguishes between train and test queries.\n\"\"\"\n\nimport argparse\nimport html\nimport json\nimport sys\nfrom pathlib import Path\n\n\ndef generate_html(data: dict, auto_refresh: bool = False, skill_name: str = \"\") -> str:\n \"\"\"Generate HTML report from loop output data. If auto_refresh is True, adds a meta refresh tag.\"\"\"\n history = data.get(\"history\", [])\n holdout = data.get(\"holdout\", 0)\n title_prefix = html.escape(skill_name + \" \\u2014 \") if skill_name else \"\"\n\n # Get all unique queries from train and test sets, with should_trigger info\n train_queries: list[dict] = []\n test_queries: list[dict] = []\n if history:\n for r in history[0].get(\"train_results\", history[0].get(\"results\", [])):\n train_queries.append({\"query\": r[\"query\"], \"should_trigger\": r.get(\"should_trigger\", True)})\n if history[0].get(\"test_results\"):\n for r in history[0].get(\"test_results\", []):\n test_queries.append({\"query\": r[\"query\"], \"should_trigger\": r.get(\"should_trigger\", True)})\n\n refresh_tag = ' \\n' if auto_refresh else \"\"\n\n html_parts = [\"\"\"\n\n\n \n\"\"\" + refresh_tag + \"\"\" \"\"\" + title_prefix + \"\"\"Skill Description Optimization\n \n \n \n \n\n\n

\"\"\" + title_prefix + \"\"\"Skill Description Optimization

\n
\n Optimizing your skill's description. This page updates automatically as Claude tests different versions of your skill's description. Each row is an iteration — a new description attempt. The columns show test queries: green checkmarks mean the skill triggered correctly (or correctly didn't trigger), red crosses mean it got it wrong. The \"Train\" score shows performance on queries used to improve the description; the \"Test\" score shows performance on held-out queries the optimizer hasn't seen. When it's done, Claude will apply the best-performing description to your skill.\n
\n\"\"\"]\n\n # Summary section\n best_test_score = data.get('best_test_score')\n best_train_score = data.get('best_train_score')\n html_parts.append(f\"\"\"\n
\n

Original: {html.escape(data.get('original_description', 'N/A'))}

\n

Best: {html.escape(data.get('best_description', 'N/A'))}

\n

Best Score: {data.get('best_score', 'N/A')} {'(test)' if best_test_score else '(train)'}

\n

Iterations: {data.get('iterations_run', 0)} | Train: {data.get('train_size', '?')} | Test: {data.get('test_size', '?')}

\n
\n\"\"\")\n\n # Legend\n html_parts.append(\"\"\"\n
\n Query columns:\n Should trigger\n Should NOT trigger\n Train\n Test\n
\n\"\"\")\n\n # Table header\n html_parts.append(\"\"\"\n
\n \n \n \n \n \n \n \n\"\"\")\n\n # Add column headers for train queries\n for qinfo in train_queries:\n polarity = \"positive-col\" if qinfo[\"should_trigger\"] else \"negative-col\"\n html_parts.append(f' \\n')\n\n # Add column headers for test queries (different color)\n for qinfo in test_queries:\n polarity = \"positive-col\" if qinfo[\"should_trigger\"] else \"negative-col\"\n html_parts.append(f' \\n')\n\n html_parts.append(\"\"\" \n \n \n\"\"\")\n\n # Find best iteration for highlighting\n if test_queries:\n best_iter = max(history, key=lambda h: h.get(\"test_passed\") or 0).get(\"iteration\")\n else:\n best_iter = max(history, key=lambda h: h.get(\"train_passed\", h.get(\"passed\", 0))).get(\"iteration\")\n\n # Add rows for each iteration\n for h in history:\n iteration = h.get(\"iteration\", \"?\")\n train_passed = h.get(\"train_passed\", h.get(\"passed\", 0))\n train_total = h.get(\"train_total\", h.get(\"total\", 0))\n test_passed = h.get(\"test_passed\")\n test_total = h.get(\"test_total\")\n description = h.get(\"description\", \"\")\n train_results = h.get(\"train_results\", h.get(\"results\", []))\n test_results = h.get(\"test_results\", [])\n\n # Create lookups for results by query\n train_by_query = {r[\"query\"]: r for r in train_results}\n test_by_query = {r[\"query\"]: r for r in test_results} if test_results else {}\n\n # Compute aggregate correct/total runs across all retries\n def aggregate_runs(results: list[dict]) -> tuple[int, int]:\n correct = 0\n total = 0\n for r in results:\n runs = r.get(\"runs\", 0)\n triggers = r.get(\"triggers\", 0)\n total += runs\n if r.get(\"should_trigger\", True):\n correct += triggers\n else:\n correct += runs - triggers\n return correct, total\n\n train_correct, train_runs = aggregate_runs(train_results)\n test_correct, test_runs = aggregate_runs(test_results)\n\n # Determine score classes\n def score_class(correct: int, total: int) -> str:\n if total > 0:\n ratio = correct / total\n if ratio >= 0.8:\n return \"score-good\"\n elif ratio >= 0.5:\n return \"score-ok\"\n return \"score-bad\"\n\n train_class = score_class(train_correct, train_runs)\n test_class = score_class(test_correct, test_runs)\n\n row_class = \"best-row\" if iteration == best_iter else \"\"\n\n html_parts.append(f\"\"\" \n \n \n \n \n\"\"\")\n\n # Add result for each train query\n for qinfo in train_queries:\n r = train_by_query.get(qinfo[\"query\"], {})\n did_pass = r.get(\"pass\", False)\n triggers = r.get(\"triggers\", 0)\n runs = r.get(\"runs\", 0)\n\n icon = \"✓\" if did_pass else \"✗\"\n css_class = \"pass\" if did_pass else \"fail\"\n\n html_parts.append(f' \\n')\n\n # Add result for each test query (with different background)\n for qinfo in test_queries:\n r = test_by_query.get(qinfo[\"query\"], {})\n did_pass = r.get(\"pass\", False)\n triggers = r.get(\"triggers\", 0)\n runs = r.get(\"runs\", 0)\n\n icon = \"✓\" if did_pass else \"✗\"\n css_class = \"pass\" if did_pass else \"fail\"\n\n html_parts.append(f' \\n')\n\n html_parts.append(\" \\n\")\n\n html_parts.append(\"\"\" \n
IterTrainTestDescription{html.escape(qinfo[\"query\"])}{html.escape(qinfo[\"query\"])}
{iteration}{train_correct}/{train_runs}{test_correct}/{test_runs}{html.escape(description)}{icon}{triggers}/{runs}{icon}{triggers}/{runs}
\n
\n\"\"\")\n\n html_parts.append(\"\"\"\n\n\n\"\"\")\n\n return \"\".join(html_parts)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Generate HTML report from run_loop output\")\n parser.add_argument(\"input\", help=\"Path to JSON output from run_loop.py (or - for stdin)\")\n parser.add_argument(\"-o\", \"--output\", default=None, help=\"Output HTML file (default: stdout)\")\n parser.add_argument(\"--skill-name\", default=\"\", help=\"Skill name to include in the report title\")\n args = parser.parse_args()\n\n if args.input == \"-\":\n data = json.load(sys.stdin)\n else:\n data = json.loads(Path(args.input).read_text())\n\n html_output = generate_html(data, skill_name=args.skill_name)\n\n if args.output:\n Path(args.output).write_text(html_output)\n print(f\"Report written to {args.output}\", file=sys.stderr)\n else:\n print(html_output)\n\n\nif __name__ == \"__main__\":\n main()\n" + }, + { + "name": "improve_description.py", + "node_type": "file", + "content": "#!/usr/bin/env python3\n\"\"\"Improve a skill description based on eval results.\n\nTakes eval results (from run_eval.py) and generates an improved description\nby calling `claude -p` as a subprocess (same auth pattern as run_eval.py —\nuses the session's Claude Code auth, no separate ANTHROPIC_API_KEY needed).\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport re\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nfrom scripts.utils import parse_skill_md\n\n\ndef _call_claude(prompt: str, model: str | None, timeout: int = 300) -> str:\n \"\"\"Run `claude -p` with the prompt on stdin and return the text response.\n\n Prompt goes over stdin (not argv) because it embeds the full SKILL.md\n body and can easily exceed comfortable argv length.\n \"\"\"\n cmd = [\"claude\", \"-p\", \"--output-format\", \"text\"]\n if model:\n cmd.extend([\"--model\", model])\n\n # Remove CLAUDECODE env var to allow nesting claude -p inside a\n # Claude Code session. The guard is for interactive terminal conflicts;\n # programmatic subprocess usage is safe. Same pattern as run_eval.py.\n env = {k: v for k, v in os.environ.items() if k != \"CLAUDECODE\"}\n\n result = subprocess.run(\n cmd,\n input=prompt,\n capture_output=True,\n text=True,\n env=env,\n timeout=timeout,\n )\n if result.returncode != 0:\n raise RuntimeError(\n f\"claude -p exited {result.returncode}\\nstderr: {result.stderr}\"\n )\n return result.stdout\n\n\ndef improve_description(\n skill_name: str,\n skill_content: str,\n current_description: str,\n eval_results: dict,\n history: list[dict],\n model: str,\n test_results: dict | None = None,\n log_dir: Path | None = None,\n iteration: int | None = None,\n) -> str:\n \"\"\"Call Claude to improve the description based on eval results.\"\"\"\n failed_triggers = [\n r for r in eval_results[\"results\"]\n if r[\"should_trigger\"] and not r[\"pass\"]\n ]\n false_triggers = [\n r for r in eval_results[\"results\"]\n if not r[\"should_trigger\"] and not r[\"pass\"]\n ]\n\n # Build scores summary\n train_score = f\"{eval_results['summary']['passed']}/{eval_results['summary']['total']}\"\n if test_results:\n test_score = f\"{test_results['summary']['passed']}/{test_results['summary']['total']}\"\n scores_summary = f\"Train: {train_score}, Test: {test_score}\"\n else:\n scores_summary = f\"Train: {train_score}\"\n\n prompt = f\"\"\"You are optimizing a skill description for a Claude Code skill called \"{skill_name}\". A \"skill\" is sort of like a prompt, but with progressive disclosure -- there's a title and description that Claude sees when deciding whether to use the skill, and then if it does use the skill, it reads the .md file which has lots more details and potentially links to other resources in the skill folder like helper files and scripts and additional documentation or examples.\n\nThe description appears in Claude's \"available_skills\" list. When a user sends a query, Claude decides whether to invoke the skill based solely on the title and on this description. Your goal is to write a description that triggers for relevant queries, and doesn't trigger for irrelevant ones.\n\nHere's the current description:\n\n\"{current_description}\"\n\n\nCurrent scores ({scores_summary}):\n\n\"\"\"\n if failed_triggers:\n prompt += \"FAILED TO TRIGGER (should have triggered but didn't):\\n\"\n for r in failed_triggers:\n prompt += f' - \"{r[\"query\"]}\" (triggered {r[\"triggers\"]}/{r[\"runs\"]} times)\\n'\n prompt += \"\\n\"\n\n if false_triggers:\n prompt += \"FALSE TRIGGERS (triggered but shouldn't have):\\n\"\n for r in false_triggers:\n prompt += f' - \"{r[\"query\"]}\" (triggered {r[\"triggers\"]}/{r[\"runs\"]} times)\\n'\n prompt += \"\\n\"\n\n if history:\n prompt += \"PREVIOUS ATTEMPTS (do NOT repeat these — try something structurally different):\\n\\n\"\n for h in history:\n train_s = f\"{h.get('train_passed', h.get('passed', 0))}/{h.get('train_total', h.get('total', 0))}\"\n test_s = f\"{h.get('test_passed', '?')}/{h.get('test_total', '?')}\" if h.get('test_passed') is not None else None\n score_str = f\"train={train_s}\" + (f\", test={test_s}\" if test_s else \"\")\n prompt += f'\\n'\n prompt += f'Description: \"{h[\"description\"]}\"\\n'\n if \"results\" in h:\n prompt += \"Train results:\\n\"\n for r in h[\"results\"]:\n status = \"PASS\" if r[\"pass\"] else \"FAIL\"\n prompt += f' [{status}] \"{r[\"query\"][:80]}\" (triggered {r[\"triggers\"]}/{r[\"runs\"]})\\n'\n if h.get(\"note\"):\n prompt += f'Note: {h[\"note\"]}\\n'\n prompt += \"\\n\\n\"\n\n prompt += f\"\"\"\n\nSkill content (for context on what the skill does):\n\n{skill_content}\n\n\nBased on the failures, write a new and improved description that is more likely to trigger correctly. When I say \"based on the failures\", it's a bit of a tricky line to walk because we don't want to overfit to the specific cases you're seeing. So what I DON'T want you to do is produce an ever-expanding list of specific queries that this skill should or shouldn't trigger for. Instead, try to generalize from the failures to broader categories of user intent and situations where this skill would be useful or not useful. The reason for this is twofold:\n\n1. Avoid overfitting\n2. The list might get loooong and it's injected into ALL queries and there might be a lot of skills, so we don't want to blow too much space on any given description.\n\nConcretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy. There is a hard limit of 1024 characters — descriptions over that will be truncated, so stay comfortably under it.\n\nHere are some tips that we've found to work well in writing these descriptions:\n- The skill should be phrased in the imperative -- \"Use this skill for\" rather than \"this skill does\"\n- The skill description should focus on the user's intent, what they are trying to achieve, vs. the implementation details of how the skill works.\n- The description competes with other skills for Claude's attention — make it distinctive and immediately recognizable.\n- If you're getting lots of failures after repeated attempts, change things up. Try different sentence structures or wordings.\n\nI'd encourage you to be creative and mix up the style in different iterations since you'll have multiple opportunities to try different approaches and we'll just grab the highest-scoring one at the end. \n\nPlease respond with only the new description text in tags, nothing else.\"\"\"\n\n text = _call_claude(prompt, model)\n\n match = re.search(r\"(.*?)\", text, re.DOTALL)\n description = match.group(1).strip().strip('\"') if match else text.strip().strip('\"')\n\n transcript: dict = {\n \"iteration\": iteration,\n \"prompt\": prompt,\n \"response\": text,\n \"parsed_description\": description,\n \"char_count\": len(description),\n \"over_limit\": len(description) > 1024,\n }\n\n # Safety net: the prompt already states the 1024-char hard limit, but if\n # the model blew past it anyway, make one fresh single-turn call that\n # quotes the too-long version and asks for a shorter rewrite. (The old\n # SDK path did this as a true multi-turn; `claude -p` is one-shot, so we\n # inline the prior output into the new prompt instead.)\n if len(description) > 1024:\n shorten_prompt = (\n f\"{prompt}\\n\\n\"\n f\"---\\n\\n\"\n f\"A previous attempt produced this description, which at \"\n f\"{len(description)} characters is over the 1024-character hard limit:\\n\\n\"\n f'\"{description}\"\\n\\n'\n f\"Rewrite it to be under 1024 characters while keeping the most \"\n f\"important trigger words and intent coverage. Respond with only \"\n f\"the new description in tags.\"\n )\n shorten_text = _call_claude(shorten_prompt, model)\n match = re.search(r\"(.*?)\", shorten_text, re.DOTALL)\n shortened = match.group(1).strip().strip('\"') if match else shorten_text.strip().strip('\"')\n\n transcript[\"rewrite_prompt\"] = shorten_prompt\n transcript[\"rewrite_response\"] = shorten_text\n transcript[\"rewrite_description\"] = shortened\n transcript[\"rewrite_char_count\"] = len(shortened)\n description = shortened\n\n transcript[\"final_description\"] = description\n\n if log_dir:\n log_dir.mkdir(parents=True, exist_ok=True)\n log_file = log_dir / f\"improve_iter_{iteration or 'unknown'}.json\"\n log_file.write_text(json.dumps(transcript, indent=2))\n\n return description\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Improve a skill description based on eval results\")\n parser.add_argument(\"--eval-results\", required=True, help=\"Path to eval results JSON (from run_eval.py)\")\n parser.add_argument(\"--skill-path\", required=True, help=\"Path to skill directory\")\n parser.add_argument(\"--history\", default=None, help=\"Path to history JSON (previous attempts)\")\n parser.add_argument(\"--model\", required=True, help=\"Model for improvement\")\n parser.add_argument(\"--verbose\", action=\"store_true\", help=\"Print thinking to stderr\")\n args = parser.parse_args()\n\n skill_path = Path(args.skill_path)\n if not (skill_path / \"SKILL.md\").exists():\n print(f\"Error: No SKILL.md found at {skill_path}\", file=sys.stderr)\n sys.exit(1)\n\n eval_results = json.loads(Path(args.eval_results).read_text())\n history = []\n if args.history:\n history = json.loads(Path(args.history).read_text())\n\n name, _, content = parse_skill_md(skill_path)\n current_description = eval_results[\"description\"]\n\n if args.verbose:\n print(f\"Current: {current_description}\", file=sys.stderr)\n print(f\"Score: {eval_results['summary']['passed']}/{eval_results['summary']['total']}\", file=sys.stderr)\n\n new_description = improve_description(\n skill_name=name,\n skill_content=content,\n current_description=current_description,\n eval_results=eval_results,\n history=history,\n model=args.model,\n )\n\n if args.verbose:\n print(f\"Improved: {new_description}\", file=sys.stderr)\n\n # Output as JSON with both the new description and updated history\n output = {\n \"description\": new_description,\n \"history\": history + [{\n \"description\": current_description,\n \"passed\": eval_results[\"summary\"][\"passed\"],\n \"failed\": eval_results[\"summary\"][\"failed\"],\n \"total\": eval_results[\"summary\"][\"total\"],\n \"results\": eval_results[\"results\"],\n }],\n }\n print(json.dumps(output, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "name": "package_skill.py", "node_type": "file", - "content": "#!/usr/bin/env python3\n\"\"\"\nSkill Packager - Creates a distributable .skill file of a skill folder\n\nUsage:\n python utils/package_skill.py [output-directory]\n\nExample:\n python utils/package_skill.py skills/public/my-skill\n python utils/package_skill.py skills/public/my-skill ./dist\n\"\"\"\n\nimport sys\nimport zipfile\nfrom pathlib import Path\nfrom quick_validate import validate_skill\n\n\ndef package_skill(skill_path, output_dir=None):\n \"\"\"\n Package a skill folder into a .skill file.\n\n Args:\n skill_path: Path to the skill folder\n output_dir: Optional output directory for the .skill file (defaults to current directory)\n\n Returns:\n Path to the created .skill file, or None if error\n \"\"\"\n skill_path = Path(skill_path).resolve()\n\n # Validate skill folder exists\n if not skill_path.exists():\n print(f\"❌ Error: Skill folder not found: {skill_path}\")\n return None\n\n if not skill_path.is_dir():\n print(f\"❌ Error: Path is not a directory: {skill_path}\")\n return None\n\n # Validate SKILL.md exists\n skill_md = skill_path / \"SKILL.md\"\n if not skill_md.exists():\n print(f\"❌ Error: SKILL.md not found in {skill_path}\")\n return None\n\n # Run validation before packaging\n print(\"🔍 Validating skill...\")\n valid, message = validate_skill(skill_path)\n if not valid:\n print(f\"❌ Validation failed: {message}\")\n print(\" Please fix the validation errors before packaging.\")\n return None\n print(f\"✅ {message}\\n\")\n\n # Determine output location\n skill_name = skill_path.name\n if output_dir:\n output_path = Path(output_dir).resolve()\n output_path.mkdir(parents=True, exist_ok=True)\n else:\n output_path = Path.cwd()\n\n skill_filename = output_path / f\"{skill_name}.skill\"\n\n # Create the .skill file (zip format)\n try:\n with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:\n # Walk through the skill directory\n for file_path in skill_path.rglob('*'):\n if file_path.is_file():\n # Calculate the relative path within the zip\n arcname = file_path.relative_to(skill_path.parent)\n zipf.write(file_path, arcname)\n print(f\" Added: {arcname}\")\n\n print(f\"\\n✅ Successfully packaged skill to: {skill_filename}\")\n return skill_filename\n\n except Exception as e:\n print(f\"❌ Error creating .skill file: {e}\")\n return None\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\"Usage: python utils/package_skill.py [output-directory]\")\n print(\"\\nExample:\")\n print(\" python utils/package_skill.py skills/public/my-skill\")\n print(\" python utils/package_skill.py skills/public/my-skill ./dist\")\n sys.exit(1)\n\n skill_path = sys.argv[1]\n output_dir = sys.argv[2] if len(sys.argv) > 2 else None\n\n print(f\"📦 Packaging skill: {skill_path}\")\n if output_dir:\n print(f\" Output directory: {output_dir}\")\n print()\n\n result = package_skill(skill_path, output_dir)\n\n if result:\n sys.exit(0)\n else:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n" + "content": "#!/usr/bin/env python3\n\"\"\"\nSkill Packager - Creates a distributable .skill file of a skill folder\n\nUsage:\n python utils/package_skill.py [output-directory]\n\nExample:\n python utils/package_skill.py skills/public/my-skill\n python utils/package_skill.py skills/public/my-skill ./dist\n\"\"\"\n\nimport fnmatch\nimport sys\nimport zipfile\nfrom pathlib import Path\nfrom scripts.quick_validate import validate_skill\n\n# Patterns to exclude when packaging skills.\nEXCLUDE_DIRS = {\"__pycache__\", \"node_modules\"}\nEXCLUDE_GLOBS = {\"*.pyc\"}\nEXCLUDE_FILES = {\".DS_Store\"}\n# Directories excluded only at the skill root (not when nested deeper).\nROOT_EXCLUDE_DIRS = {\"evals\"}\n\n\ndef should_exclude(rel_path: Path) -> bool:\n \"\"\"Check if a path should be excluded from packaging.\"\"\"\n parts = rel_path.parts\n if any(part in EXCLUDE_DIRS for part in parts):\n return True\n # rel_path is relative to skill_path.parent, so parts[0] is the skill\n # folder name and parts[1] (if present) is the first subdir.\n if len(parts) > 1 and parts[1] in ROOT_EXCLUDE_DIRS:\n return True\n name = rel_path.name\n if name in EXCLUDE_FILES:\n return True\n return any(fnmatch.fnmatch(name, pat) for pat in EXCLUDE_GLOBS)\n\n\ndef package_skill(skill_path, output_dir=None):\n \"\"\"\n Package a skill folder into a .skill file.\n\n Args:\n skill_path: Path to the skill folder\n output_dir: Optional output directory for the .skill file (defaults to current directory)\n\n Returns:\n Path to the created .skill file, or None if error\n \"\"\"\n skill_path = Path(skill_path).resolve()\n\n # Validate skill folder exists\n if not skill_path.exists():\n print(f\"❌ Error: Skill folder not found: {skill_path}\")\n return None\n\n if not skill_path.is_dir():\n print(f\"❌ Error: Path is not a directory: {skill_path}\")\n return None\n\n # Validate SKILL.md exists\n skill_md = skill_path / \"SKILL.md\"\n if not skill_md.exists():\n print(f\"❌ Error: SKILL.md not found in {skill_path}\")\n return None\n\n # Run validation before packaging\n print(\"🔍 Validating skill...\")\n valid, message = validate_skill(skill_path)\n if not valid:\n print(f\"❌ Validation failed: {message}\")\n print(\" Please fix the validation errors before packaging.\")\n return None\n print(f\"✅ {message}\\n\")\n\n # Determine output location\n skill_name = skill_path.name\n if output_dir:\n output_path = Path(output_dir).resolve()\n output_path.mkdir(parents=True, exist_ok=True)\n else:\n output_path = Path.cwd()\n\n skill_filename = output_path / f\"{skill_name}.skill\"\n\n # Create the .skill file (zip format)\n try:\n with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:\n # Walk through the skill directory, excluding build artifacts\n for file_path in skill_path.rglob('*'):\n if not file_path.is_file():\n continue\n arcname = file_path.relative_to(skill_path.parent)\n if should_exclude(arcname):\n print(f\" Skipped: {arcname}\")\n continue\n zipf.write(file_path, arcname)\n print(f\" Added: {arcname}\")\n\n print(f\"\\n✅ Successfully packaged skill to: {skill_filename}\")\n return skill_filename\n\n except Exception as e:\n print(f\"❌ Error creating .skill file: {e}\")\n return None\n\n\ndef main():\n if len(sys.argv) < 2:\n print(\"Usage: python utils/package_skill.py [output-directory]\")\n print(\"\\nExample:\")\n print(\" python utils/package_skill.py skills/public/my-skill\")\n print(\" python utils/package_skill.py skills/public/my-skill ./dist\")\n sys.exit(1)\n\n skill_path = sys.argv[1]\n output_dir = sys.argv[2] if len(sys.argv) > 2 else None\n\n print(f\"📦 Packaging skill: {skill_path}\")\n if output_dir:\n print(f\" Output directory: {output_dir}\")\n print()\n\n result = package_skill(skill_path, output_dir)\n\n if result:\n sys.exit(0)\n else:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "name": "quick_validate.py", "node_type": "file", - "content": "#!/usr/bin/env python3\n\"\"\"\nQuick validation script for skills - minimal version\n\"\"\"\n\nimport sys\nimport os\nimport re\nimport yaml\nfrom pathlib import Path\n\ndef validate_skill(skill_path):\n \"\"\"Basic validation of a skill\"\"\"\n skill_path = Path(skill_path)\n\n # Check SKILL.md exists\n skill_md = skill_path / 'SKILL.md'\n if not skill_md.exists():\n return False, \"SKILL.md not found\"\n\n # Read and validate frontmatter\n content = skill_md.read_text()\n if not content.startswith('---'):\n return False, \"No YAML frontmatter found\"\n\n # Extract frontmatter\n match = re.match(r'^---\\n(.*?)\\n---', content, re.DOTALL)\n if not match:\n return False, \"Invalid frontmatter format\"\n\n frontmatter_text = match.group(1)\n\n # Parse YAML frontmatter\n try:\n frontmatter = yaml.safe_load(frontmatter_text)\n if not isinstance(frontmatter, dict):\n return False, \"Frontmatter must be a YAML dictionary\"\n except yaml.YAMLError as e:\n return False, f\"Invalid YAML in frontmatter: {e}\"\n\n # Define allowed properties\n ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata'}\n\n # Check for unexpected properties (excluding nested keys under metadata)\n unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES\n if unexpected_keys:\n return False, (\n f\"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. \"\n f\"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}\"\n )\n\n # Check required fields\n if 'name' not in frontmatter:\n return False, \"Missing 'name' in frontmatter\"\n if 'description' not in frontmatter:\n return False, \"Missing 'description' in frontmatter\"\n\n # Extract name for validation\n name = frontmatter.get('name', '')\n if not isinstance(name, str):\n return False, f\"Name must be a string, got {type(name).__name__}\"\n name = name.strip()\n if name:\n # Check naming convention (hyphen-case: lowercase with hyphens)\n if not re.match(r'^[a-z0-9-]+$', name):\n return False, f\"Name '{name}' should be hyphen-case (lowercase letters, digits, and hyphens only)\"\n if name.startswith('-') or name.endswith('-') or '--' in name:\n return False, f\"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens\"\n # Check name length (max 64 characters per spec)\n if len(name) > 64:\n return False, f\"Name is too long ({len(name)} characters). Maximum is 64 characters.\"\n\n # Extract and validate description\n description = frontmatter.get('description', '')\n if not isinstance(description, str):\n return False, f\"Description must be a string, got {type(description).__name__}\"\n description = description.strip()\n if description:\n # Check for angle brackets\n if '<' in description or '>' in description:\n return False, \"Description cannot contain angle brackets (< or >)\"\n # Check description length (max 1024 characters per spec)\n if len(description) > 1024:\n return False, f\"Description is too long ({len(description)} characters). Maximum is 1024 characters.\"\n\n return True, \"Skill is valid!\"\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python quick_validate.py \")\n sys.exit(1)\n \n valid, message = validate_skill(sys.argv[1])\n print(message)\n sys.exit(0 if valid else 1)" + "content": "#!/usr/bin/env python3\n\"\"\"\nQuick validation script for skills - minimal version\n\"\"\"\n\nimport sys\nimport os\nimport re\nimport yaml\nfrom pathlib import Path\n\ndef validate_skill(skill_path):\n \"\"\"Basic validation of a skill\"\"\"\n skill_path = Path(skill_path)\n\n # Check SKILL.md exists\n skill_md = skill_path / 'SKILL.md'\n if not skill_md.exists():\n return False, \"SKILL.md not found\"\n\n # Read and validate frontmatter\n content = skill_md.read_text()\n if not content.startswith('---'):\n return False, \"No YAML frontmatter found\"\n\n # Extract frontmatter\n match = re.match(r'^---\\n(.*?)\\n---', content, re.DOTALL)\n if not match:\n return False, \"Invalid frontmatter format\"\n\n frontmatter_text = match.group(1)\n\n # Parse YAML frontmatter\n try:\n frontmatter = yaml.safe_load(frontmatter_text)\n if not isinstance(frontmatter, dict):\n return False, \"Frontmatter must be a YAML dictionary\"\n except yaml.YAMLError as e:\n return False, f\"Invalid YAML in frontmatter: {e}\"\n\n # Define allowed properties\n ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata', 'compatibility'}\n\n # Check for unexpected properties (excluding nested keys under metadata)\n unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES\n if unexpected_keys:\n return False, (\n f\"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. \"\n f\"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}\"\n )\n\n # Check required fields\n if 'name' not in frontmatter:\n return False, \"Missing 'name' in frontmatter\"\n if 'description' not in frontmatter:\n return False, \"Missing 'description' in frontmatter\"\n\n # Extract name for validation\n name = frontmatter.get('name', '')\n if not isinstance(name, str):\n return False, f\"Name must be a string, got {type(name).__name__}\"\n name = name.strip()\n if name:\n # Check naming convention (kebab-case: lowercase with hyphens)\n if not re.match(r'^[a-z0-9-]+$', name):\n return False, f\"Name '{name}' should be kebab-case (lowercase letters, digits, and hyphens only)\"\n if name.startswith('-') or name.endswith('-') or '--' in name:\n return False, f\"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens\"\n # Check name length (max 64 characters per spec)\n if len(name) > 64:\n return False, f\"Name is too long ({len(name)} characters). Maximum is 64 characters.\"\n\n # Extract and validate description\n description = frontmatter.get('description', '')\n if not isinstance(description, str):\n return False, f\"Description must be a string, got {type(description).__name__}\"\n description = description.strip()\n if description:\n # Check for angle brackets\n if '<' in description or '>' in description:\n return False, \"Description cannot contain angle brackets (< or >)\"\n # Check description length (max 1024 characters per spec)\n if len(description) > 1024:\n return False, f\"Description is too long ({len(description)} characters). Maximum is 1024 characters.\"\n\n # Validate compatibility field if present (optional)\n compatibility = frontmatter.get('compatibility', '')\n if compatibility:\n if not isinstance(compatibility, str):\n return False, f\"Compatibility must be a string, got {type(compatibility).__name__}\"\n if len(compatibility) > 500:\n return False, f\"Compatibility is too long ({len(compatibility)} characters). Maximum is 500 characters.\"\n\n return True, \"Skill is valid!\"\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python quick_validate.py \")\n sys.exit(1)\n \n valid, message = validate_skill(sys.argv[1])\n print(message)\n sys.exit(0 if valid else 1)" + }, + { + "name": "run_eval.py", + "node_type": "file", + "content": "#!/usr/bin/env python3\n\"\"\"Run trigger evaluation for a skill description.\n\nTests whether a skill's description causes Claude to trigger (read the skill)\nfor a set of queries. Outputs results as JSON.\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport select\nimport subprocess\nimport sys\nimport time\nimport uuid\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\nfrom pathlib import Path\n\nfrom scripts.utils import parse_skill_md\n\n\ndef find_project_root() -> Path:\n \"\"\"Find the project root by walking up from cwd looking for .claude/.\n\n Mimics how Claude Code discovers its project root, so the command file\n we create ends up where claude -p will look for it.\n \"\"\"\n current = Path.cwd()\n for parent in [current, *current.parents]:\n if (parent / \".claude\").is_dir():\n return parent\n return current\n\n\ndef run_single_query(\n query: str,\n skill_name: str,\n skill_description: str,\n timeout: int,\n project_root: str,\n model: str | None = None,\n) -> bool:\n \"\"\"Run a single query and return whether the skill was triggered.\n\n Creates a command file in .claude/commands/ so it appears in Claude's\n available_skills list, then runs `claude -p` with the raw query.\n Uses --include-partial-messages to detect triggering early from\n stream events (content_block_start) rather than waiting for the\n full assistant message, which only arrives after tool execution.\n \"\"\"\n unique_id = uuid.uuid4().hex[:8]\n clean_name = f\"{skill_name}-skill-{unique_id}\"\n project_commands_dir = Path(project_root) / \".claude\" / \"commands\"\n command_file = project_commands_dir / f\"{clean_name}.md\"\n\n try:\n project_commands_dir.mkdir(parents=True, exist_ok=True)\n # Use YAML block scalar to avoid breaking on quotes in description\n indented_desc = \"\\n \".join(skill_description.split(\"\\n\"))\n command_content = (\n f\"---\\n\"\n f\"description: |\\n\"\n f\" {indented_desc}\\n\"\n f\"---\\n\\n\"\n f\"# {skill_name}\\n\\n\"\n f\"This skill handles: {skill_description}\\n\"\n )\n command_file.write_text(command_content)\n\n cmd = [\n \"claude\",\n \"-p\", query,\n \"--output-format\", \"stream-json\",\n \"--verbose\",\n \"--include-partial-messages\",\n ]\n if model:\n cmd.extend([\"--model\", model])\n\n # Remove CLAUDECODE env var to allow nesting claude -p inside a\n # Claude Code session. The guard is for interactive terminal conflicts;\n # programmatic subprocess usage is safe.\n env = {k: v for k, v in os.environ.items() if k != \"CLAUDECODE\"}\n\n process = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL,\n cwd=project_root,\n env=env,\n )\n\n triggered = False\n start_time = time.time()\n buffer = \"\"\n # Track state for stream event detection\n pending_tool_name = None\n accumulated_json = \"\"\n\n try:\n while time.time() - start_time < timeout:\n if process.poll() is not None:\n remaining = process.stdout.read()\n if remaining:\n buffer += remaining.decode(\"utf-8\", errors=\"replace\")\n break\n\n ready, _, _ = select.select([process.stdout], [], [], 1.0)\n if not ready:\n continue\n\n chunk = os.read(process.stdout.fileno(), 8192)\n if not chunk:\n break\n buffer += chunk.decode(\"utf-8\", errors=\"replace\")\n\n while \"\\n\" in buffer:\n line, buffer = buffer.split(\"\\n\", 1)\n line = line.strip()\n if not line:\n continue\n\n try:\n event = json.loads(line)\n except json.JSONDecodeError:\n continue\n\n # Early detection via stream events\n if event.get(\"type\") == \"stream_event\":\n se = event.get(\"event\", {})\n se_type = se.get(\"type\", \"\")\n\n if se_type == \"content_block_start\":\n cb = se.get(\"content_block\", {})\n if cb.get(\"type\") == \"tool_use\":\n tool_name = cb.get(\"name\", \"\")\n if tool_name in (\"Skill\", \"Read\"):\n pending_tool_name = tool_name\n accumulated_json = \"\"\n else:\n return False\n\n elif se_type == \"content_block_delta\" and pending_tool_name:\n delta = se.get(\"delta\", {})\n if delta.get(\"type\") == \"input_json_delta\":\n accumulated_json += delta.get(\"partial_json\", \"\")\n if clean_name in accumulated_json:\n return True\n\n elif se_type in (\"content_block_stop\", \"message_stop\"):\n if pending_tool_name:\n return clean_name in accumulated_json\n if se_type == \"message_stop\":\n return False\n\n # Fallback: full assistant message\n elif event.get(\"type\") == \"assistant\":\n message = event.get(\"message\", {})\n for content_item in message.get(\"content\", []):\n if content_item.get(\"type\") != \"tool_use\":\n continue\n tool_name = content_item.get(\"name\", \"\")\n tool_input = content_item.get(\"input\", {})\n if tool_name == \"Skill\" and clean_name in tool_input.get(\"skill\", \"\"):\n triggered = True\n elif tool_name == \"Read\" and clean_name in tool_input.get(\"file_path\", \"\"):\n triggered = True\n return triggered\n\n elif event.get(\"type\") == \"result\":\n return triggered\n finally:\n # Clean up process on any exit path (return, exception, timeout)\n if process.poll() is None:\n process.kill()\n process.wait()\n\n return triggered\n finally:\n if command_file.exists():\n command_file.unlink()\n\n\ndef run_eval(\n eval_set: list[dict],\n skill_name: str,\n description: str,\n num_workers: int,\n timeout: int,\n project_root: Path,\n runs_per_query: int = 1,\n trigger_threshold: float = 0.5,\n model: str | None = None,\n) -> dict:\n \"\"\"Run the full eval set and return results.\"\"\"\n results = []\n\n with ProcessPoolExecutor(max_workers=num_workers) as executor:\n future_to_info = {}\n for item in eval_set:\n for run_idx in range(runs_per_query):\n future = executor.submit(\n run_single_query,\n item[\"query\"],\n skill_name,\n description,\n timeout,\n str(project_root),\n model,\n )\n future_to_info[future] = (item, run_idx)\n\n query_triggers: dict[str, list[bool]] = {}\n query_items: dict[str, dict] = {}\n for future in as_completed(future_to_info):\n item, _ = future_to_info[future]\n query = item[\"query\"]\n query_items[query] = item\n if query not in query_triggers:\n query_triggers[query] = []\n try:\n query_triggers[query].append(future.result())\n except Exception as e:\n print(f\"Warning: query failed: {e}\", file=sys.stderr)\n query_triggers[query].append(False)\n\n for query, triggers in query_triggers.items():\n item = query_items[query]\n trigger_rate = sum(triggers) / len(triggers)\n should_trigger = item[\"should_trigger\"]\n if should_trigger:\n did_pass = trigger_rate >= trigger_threshold\n else:\n did_pass = trigger_rate < trigger_threshold\n results.append({\n \"query\": query,\n \"should_trigger\": should_trigger,\n \"trigger_rate\": trigger_rate,\n \"triggers\": sum(triggers),\n \"runs\": len(triggers),\n \"pass\": did_pass,\n })\n\n passed = sum(1 for r in results if r[\"pass\"])\n total = len(results)\n\n return {\n \"skill_name\": skill_name,\n \"description\": description,\n \"results\": results,\n \"summary\": {\n \"total\": total,\n \"passed\": passed,\n \"failed\": total - passed,\n },\n }\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Run trigger evaluation for a skill description\")\n parser.add_argument(\"--eval-set\", required=True, help=\"Path to eval set JSON file\")\n parser.add_argument(\"--skill-path\", required=True, help=\"Path to skill directory\")\n parser.add_argument(\"--description\", default=None, help=\"Override description to test\")\n parser.add_argument(\"--num-workers\", type=int, default=10, help=\"Number of parallel workers\")\n parser.add_argument(\"--timeout\", type=int, default=30, help=\"Timeout per query in seconds\")\n parser.add_argument(\"--runs-per-query\", type=int, default=3, help=\"Number of runs per query\")\n parser.add_argument(\"--trigger-threshold\", type=float, default=0.5, help=\"Trigger rate threshold\")\n parser.add_argument(\"--model\", default=None, help=\"Model to use for claude -p (default: user's configured model)\")\n parser.add_argument(\"--verbose\", action=\"store_true\", help=\"Print progress to stderr\")\n args = parser.parse_args()\n\n eval_set = json.loads(Path(args.eval_set).read_text())\n skill_path = Path(args.skill_path)\n\n if not (skill_path / \"SKILL.md\").exists():\n print(f\"Error: No SKILL.md found at {skill_path}\", file=sys.stderr)\n sys.exit(1)\n\n name, original_description, content = parse_skill_md(skill_path)\n description = args.description or original_description\n project_root = find_project_root()\n\n if args.verbose:\n print(f\"Evaluating: {description}\", file=sys.stderr)\n\n output = run_eval(\n eval_set=eval_set,\n skill_name=name,\n description=description,\n num_workers=args.num_workers,\n timeout=args.timeout,\n project_root=project_root,\n runs_per_query=args.runs_per_query,\n trigger_threshold=args.trigger_threshold,\n model=args.model,\n )\n\n if args.verbose:\n summary = output[\"summary\"]\n print(f\"Results: {summary['passed']}/{summary['total']} passed\", file=sys.stderr)\n for r in output[\"results\"]:\n status = \"PASS\" if r[\"pass\"] else \"FAIL\"\n rate_str = f\"{r['triggers']}/{r['runs']}\"\n print(f\" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:70]}\", file=sys.stderr)\n\n print(json.dumps(output, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n" + }, + { + "name": "run_loop.py", + "node_type": "file", + "content": "#!/usr/bin/env python3\n\"\"\"Run the eval + improve loop until all pass or max iterations reached.\n\nCombines run_eval.py and improve_description.py in a loop, tracking history\nand returning the best description found. Supports train/test split to prevent\noverfitting.\n\"\"\"\n\nimport argparse\nimport json\nimport random\nimport sys\nimport tempfile\nimport time\nimport webbrowser\nfrom pathlib import Path\n\nfrom scripts.generate_report import generate_html\nfrom scripts.improve_description import improve_description\nfrom scripts.run_eval import find_project_root, run_eval\nfrom scripts.utils import parse_skill_md\n\n\ndef split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]:\n \"\"\"Split eval set into train and test sets, stratified by should_trigger.\"\"\"\n random.seed(seed)\n\n # Separate by should_trigger\n trigger = [e for e in eval_set if e[\"should_trigger\"]]\n no_trigger = [e for e in eval_set if not e[\"should_trigger\"]]\n\n # Shuffle each group\n random.shuffle(trigger)\n random.shuffle(no_trigger)\n\n # Calculate split points\n n_trigger_test = max(1, int(len(trigger) * holdout))\n n_no_trigger_test = max(1, int(len(no_trigger) * holdout))\n\n # Split\n test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test]\n train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:]\n\n return train_set, test_set\n\n\ndef run_loop(\n eval_set: list[dict],\n skill_path: Path,\n description_override: str | None,\n num_workers: int,\n timeout: int,\n max_iterations: int,\n runs_per_query: int,\n trigger_threshold: float,\n holdout: float,\n model: str,\n verbose: bool,\n live_report_path: Path | None = None,\n log_dir: Path | None = None,\n) -> dict:\n \"\"\"Run the eval + improvement loop.\"\"\"\n project_root = find_project_root()\n name, original_description, content = parse_skill_md(skill_path)\n current_description = description_override or original_description\n\n # Split into train/test if holdout > 0\n if holdout > 0:\n train_set, test_set = split_eval_set(eval_set, holdout)\n if verbose:\n print(f\"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})\", file=sys.stderr)\n else:\n train_set = eval_set\n test_set = []\n\n history = []\n exit_reason = \"unknown\"\n\n for iteration in range(1, max_iterations + 1):\n if verbose:\n print(f\"\\n{'='*60}\", file=sys.stderr)\n print(f\"Iteration {iteration}/{max_iterations}\", file=sys.stderr)\n print(f\"Description: {current_description}\", file=sys.stderr)\n print(f\"{'='*60}\", file=sys.stderr)\n\n # Evaluate train + test together in one batch for parallelism\n all_queries = train_set + test_set\n t0 = time.time()\n all_results = run_eval(\n eval_set=all_queries,\n skill_name=name,\n description=current_description,\n num_workers=num_workers,\n timeout=timeout,\n project_root=project_root,\n runs_per_query=runs_per_query,\n trigger_threshold=trigger_threshold,\n model=model,\n )\n eval_elapsed = time.time() - t0\n\n # Split results back into train/test by matching queries\n train_queries_set = {q[\"query\"] for q in train_set}\n train_result_list = [r for r in all_results[\"results\"] if r[\"query\"] in train_queries_set]\n test_result_list = [r for r in all_results[\"results\"] if r[\"query\"] not in train_queries_set]\n\n train_passed = sum(1 for r in train_result_list if r[\"pass\"])\n train_total = len(train_result_list)\n train_summary = {\"passed\": train_passed, \"failed\": train_total - train_passed, \"total\": train_total}\n train_results = {\"results\": train_result_list, \"summary\": train_summary}\n\n if test_set:\n test_passed = sum(1 for r in test_result_list if r[\"pass\"])\n test_total = len(test_result_list)\n test_summary = {\"passed\": test_passed, \"failed\": test_total - test_passed, \"total\": test_total}\n test_results = {\"results\": test_result_list, \"summary\": test_summary}\n else:\n test_results = None\n test_summary = None\n\n history.append({\n \"iteration\": iteration,\n \"description\": current_description,\n \"train_passed\": train_summary[\"passed\"],\n \"train_failed\": train_summary[\"failed\"],\n \"train_total\": train_summary[\"total\"],\n \"train_results\": train_results[\"results\"],\n \"test_passed\": test_summary[\"passed\"] if test_summary else None,\n \"test_failed\": test_summary[\"failed\"] if test_summary else None,\n \"test_total\": test_summary[\"total\"] if test_summary else None,\n \"test_results\": test_results[\"results\"] if test_results else None,\n # For backward compat with report generator\n \"passed\": train_summary[\"passed\"],\n \"failed\": train_summary[\"failed\"],\n \"total\": train_summary[\"total\"],\n \"results\": train_results[\"results\"],\n })\n\n # Write live report if path provided\n if live_report_path:\n partial_output = {\n \"original_description\": original_description,\n \"best_description\": current_description,\n \"best_score\": \"in progress\",\n \"iterations_run\": len(history),\n \"holdout\": holdout,\n \"train_size\": len(train_set),\n \"test_size\": len(test_set),\n \"history\": history,\n }\n live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name))\n\n if verbose:\n def print_eval_stats(label, results, elapsed):\n pos = [r for r in results if r[\"should_trigger\"]]\n neg = [r for r in results if not r[\"should_trigger\"]]\n tp = sum(r[\"triggers\"] for r in pos)\n pos_runs = sum(r[\"runs\"] for r in pos)\n fn = pos_runs - tp\n fp = sum(r[\"triggers\"] for r in neg)\n neg_runs = sum(r[\"runs\"] for r in neg)\n tn = neg_runs - fp\n total = tp + tn + fp + fn\n precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0\n recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0\n accuracy = (tp + tn) / total if total > 0 else 0.0\n print(f\"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)\", file=sys.stderr)\n for r in results:\n status = \"PASS\" if r[\"pass\"] else \"FAIL\"\n rate_str = f\"{r['triggers']}/{r['runs']}\"\n print(f\" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}\", file=sys.stderr)\n\n print_eval_stats(\"Train\", train_results[\"results\"], eval_elapsed)\n if test_summary:\n print_eval_stats(\"Test \", test_results[\"results\"], 0)\n\n if train_summary[\"failed\"] == 0:\n exit_reason = f\"all_passed (iteration {iteration})\"\n if verbose:\n print(f\"\\nAll train queries passed on iteration {iteration}!\", file=sys.stderr)\n break\n\n if iteration == max_iterations:\n exit_reason = f\"max_iterations ({max_iterations})\"\n if verbose:\n print(f\"\\nMax iterations reached ({max_iterations}).\", file=sys.stderr)\n break\n\n # Improve the description based on train results\n if verbose:\n print(f\"\\nImproving description...\", file=sys.stderr)\n\n t0 = time.time()\n # Strip test scores from history so improvement model can't see them\n blinded_history = [\n {k: v for k, v in h.items() if not k.startswith(\"test_\")}\n for h in history\n ]\n new_description = improve_description(\n skill_name=name,\n skill_content=content,\n current_description=current_description,\n eval_results=train_results,\n history=blinded_history,\n model=model,\n log_dir=log_dir,\n iteration=iteration,\n )\n improve_elapsed = time.time() - t0\n\n if verbose:\n print(f\"Proposed ({improve_elapsed:.1f}s): {new_description}\", file=sys.stderr)\n\n current_description = new_description\n\n # Find the best iteration by TEST score (or train if no test set)\n if test_set:\n best = max(history, key=lambda h: h[\"test_passed\"] or 0)\n best_score = f\"{best['test_passed']}/{best['test_total']}\"\n else:\n best = max(history, key=lambda h: h[\"train_passed\"])\n best_score = f\"{best['train_passed']}/{best['train_total']}\"\n\n if verbose:\n print(f\"\\nExit reason: {exit_reason}\", file=sys.stderr)\n print(f\"Best score: {best_score} (iteration {best['iteration']})\", file=sys.stderr)\n\n return {\n \"exit_reason\": exit_reason,\n \"original_description\": original_description,\n \"best_description\": best[\"description\"],\n \"best_score\": best_score,\n \"best_train_score\": f\"{best['train_passed']}/{best['train_total']}\",\n \"best_test_score\": f\"{best['test_passed']}/{best['test_total']}\" if test_set else None,\n \"final_description\": current_description,\n \"iterations_run\": len(history),\n \"holdout\": holdout,\n \"train_size\": len(train_set),\n \"test_size\": len(test_set),\n \"history\": history,\n }\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Run eval + improve loop\")\n parser.add_argument(\"--eval-set\", required=True, help=\"Path to eval set JSON file\")\n parser.add_argument(\"--skill-path\", required=True, help=\"Path to skill directory\")\n parser.add_argument(\"--description\", default=None, help=\"Override starting description\")\n parser.add_argument(\"--num-workers\", type=int, default=10, help=\"Number of parallel workers\")\n parser.add_argument(\"--timeout\", type=int, default=30, help=\"Timeout per query in seconds\")\n parser.add_argument(\"--max-iterations\", type=int, default=5, help=\"Max improvement iterations\")\n parser.add_argument(\"--runs-per-query\", type=int, default=3, help=\"Number of runs per query\")\n parser.add_argument(\"--trigger-threshold\", type=float, default=0.5, help=\"Trigger rate threshold\")\n parser.add_argument(\"--holdout\", type=float, default=0.4, help=\"Fraction of eval set to hold out for testing (0 to disable)\")\n parser.add_argument(\"--model\", required=True, help=\"Model for improvement\")\n parser.add_argument(\"--verbose\", action=\"store_true\", help=\"Print progress to stderr\")\n parser.add_argument(\"--report\", default=\"auto\", help=\"Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)\")\n parser.add_argument(\"--results-dir\", default=None, help=\"Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here\")\n args = parser.parse_args()\n\n eval_set = json.loads(Path(args.eval_set).read_text())\n skill_path = Path(args.skill_path)\n\n if not (skill_path / \"SKILL.md\").exists():\n print(f\"Error: No SKILL.md found at {skill_path}\", file=sys.stderr)\n sys.exit(1)\n\n name, _, _ = parse_skill_md(skill_path)\n\n # Set up live report path\n if args.report != \"none\":\n if args.report == \"auto\":\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\")\n live_report_path = Path(tempfile.gettempdir()) / f\"skill_description_report_{skill_path.name}_{timestamp}.html\"\n else:\n live_report_path = Path(args.report)\n # Open the report immediately so the user can watch\n live_report_path.write_text(\"

Starting optimization loop...

\")\n webbrowser.open(str(live_report_path))\n else:\n live_report_path = None\n\n # Determine output directory (create before run_loop so logs can be written)\n if args.results_dir:\n timestamp = time.strftime(\"%Y-%m-%d_%H%M%S\")\n results_dir = Path(args.results_dir) / timestamp\n results_dir.mkdir(parents=True, exist_ok=True)\n else:\n results_dir = None\n\n log_dir = results_dir / \"logs\" if results_dir else None\n\n output = run_loop(\n eval_set=eval_set,\n skill_path=skill_path,\n description_override=args.description,\n num_workers=args.num_workers,\n timeout=args.timeout,\n max_iterations=args.max_iterations,\n runs_per_query=args.runs_per_query,\n trigger_threshold=args.trigger_threshold,\n holdout=args.holdout,\n model=args.model,\n verbose=args.verbose,\n live_report_path=live_report_path,\n log_dir=log_dir,\n )\n\n # Save JSON output\n json_output = json.dumps(output, indent=2)\n print(json_output)\n if results_dir:\n (results_dir / \"results.json\").write_text(json_output)\n\n # Write final HTML report (without auto-refresh)\n if live_report_path:\n live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name))\n print(f\"\\nReport: {live_report_path}\", file=sys.stderr)\n\n if results_dir and live_report_path:\n (results_dir / \"report.html\").write_text(generate_html(output, auto_refresh=False, skill_name=name))\n\n if results_dir:\n print(f\"Results saved to: {results_dir}\", file=sys.stderr)\n\n\nif __name__ == \"__main__\":\n main()\n" + }, + { + "name": "utils.py", + "node_type": "file", + "content": "\"\"\"Shared utilities for skill-creator scripts.\"\"\"\n\nfrom pathlib import Path\n\n\n\ndef parse_skill_md(skill_path: Path) -> tuple[str, str, str]:\n \"\"\"Parse a SKILL.md file, returning (name, description, full_content).\"\"\"\n content = (skill_path / \"SKILL.md\").read_text()\n lines = content.split(\"\\n\")\n\n if lines[0].strip() != \"---\":\n raise ValueError(\"SKILL.md missing frontmatter (no opening ---)\")\n\n end_idx = None\n for i, line in enumerate(lines[1:], start=1):\n if line.strip() == \"---\":\n end_idx = i\n break\n\n if end_idx is None:\n raise ValueError(\"SKILL.md missing frontmatter (no closing ---)\")\n\n name = \"\"\n description = \"\"\n frontmatter_lines = lines[1:end_idx]\n i = 0\n while i < len(frontmatter_lines):\n line = frontmatter_lines[i]\n if line.startswith(\"name:\"):\n name = line[len(\"name:\"):].strip().strip('\"').strip(\"'\")\n elif line.startswith(\"description:\"):\n value = line[len(\"description:\"):].strip()\n # Handle YAML multiline indicators (>, |, >-, |-)\n if value in (\">\", \"|\", \">-\", \"|-\"):\n continuation_lines: list[str] = []\n i += 1\n while i < len(frontmatter_lines) and (frontmatter_lines[i].startswith(\" \") or frontmatter_lines[i].startswith(\"\\t\")):\n continuation_lines.append(frontmatter_lines[i].strip())\n i += 1\n description = \" \".join(continuation_lines)\n continue\n else:\n description = value.strip('\"').strip(\"'\")\n i += 1\n\n return name, description, content\n" } ] }