diff --git a/.github/labeler.yml b/.github/labeler.yml index 8af2931c63..2d9ba5b440 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,114 +1,131 @@ -# Configuration for GitHub Actions Labeler -# https://github.com/actions/labeler +# Configuration for fuxingloh/multi-labeler +# https://github.com/fuxingloh/multi-labeler -# Label for LangSmith documentation changes -langsmith: - - changed-files: - - any-glob-to-any-file: - - src/langsmith/** - - src/langsmith/**/* +version: v1 -# Label for LangGraph documentation changes -langgraph: - - changed-files: - - any-glob-to-any-file: - - src/oss/langgraph/** - - src/oss/langgraph/**/* +labels: + # Label for LangSmith documentation changes + - label: langsmith + sync: true + matcher: + files: + any: + - 'src/langsmith/**' -# Label for LangChain documentation changes -langchain: - - changed-files: - - any-glob-to-any-file: - - src/oss/langchain/** - - src/oss/langchain/**/* - - src/oss/python/** - - src/oss/python/**/* - - src/oss/javascript/** - - src/oss/javascript/**/* + # Label for LangGraph documentation changes + - label: langgraph + sync: true + matcher: + files: + any: + - 'src/oss/langgraph/**' -# Label for OSS documentation changes -oss: - - changed-files: - - any-glob-to-any-file: - - src/oss/** - - src/oss/**/* + # Label for LangChain documentation changes + - label: langchain + sync: true + matcher: + files: + any: + - 'src/oss/langchain/**' + - 'src/oss/python/**' + - 'src/oss/javascript/**' -# Label for Python-specific documentation -python: - - changed-files: - - any-glob-to-any-file: - - src/oss/python/** - - src/oss/python/**/* - - '**/*.py' + # Label for OSS documentation changes + - label: oss + sync: true + matcher: + files: + any: + - 'src/oss/**' -# Label for JavaScript-specific documentation -javascript: - - changed-files: - - any-glob-to-any-file: - - src/oss/javascript/** - - src/oss/javascript/**/* - - '**/*.js' - - '**/*.ts' + # Label for Python-specific documentation + - label: python + sync: true + matcher: + files: + any: + - 'src/oss/python/**' + - '**/*.py' -# Label for reference-specific documentation -reference: - - changed-files: - - any-glob-to-any-file: - - reference/** - - reference/**/* + # Label for JavaScript-specific documentation + - label: javascript + sync: true + matcher: + files: + any: + - 'src/oss/javascript/**' + - '**/*.js' + - '**/*.ts' -# Label for Python reference-specific documentation -python-reference: - - changed-files: - - any-glob-to-any-file: - - reference/python/** - - reference/python/**/* + # Label for reference-specific documentation + - label: reference + sync: true + matcher: + files: + any: + - 'reference/**' -# Label for JS reference-specific documentation -javascript-reference: - - changed-files: - - any-glob-to-any-file: - - reference/javascript/** - - reference/javascript/**/* + # Label for Python reference-specific documentation + - label: python-reference + sync: true + matcher: + files: + any: + - 'reference/python/**' -# Label for build/CI changes -ci: - - changed-files: - - any-glob-to-any-file: - - .github/workflows/** - - .github/actions/** - - Makefile - - pyproject.toml - - package.json - - uv.lock + # Label for JS reference-specific documentation + - label: javascript-reference + sync: true + matcher: + files: + any: + - 'reference/javascript/**' -# Label for documentation infrastructure changes -docs-infra: - - changed-files: - - any-glob-to-any-file: - - pipeline/** - - pipeline/**/* - - scripts/** - - scripts/**/* + # Label for build/CI changes + - label: ci + sync: true + matcher: + files: + any: + - '.github/workflows/**' + - '.github/actions/**' + - 'Makefile' + - 'pyproject.toml' + - 'package.json' + - 'uv.lock' -# Label for test changes -tests: - - changed-files: - - any-glob-to-any-file: - - tests/** - - tests/**/* - - '**/*test*.py' - - '**/*test*.js' - - '**/*test*.ts' + # Label for documentation infrastructure changes + - label: docs-infra + sync: true + matcher: + files: + any: + - 'pipeline/**' + - 'scripts/**' -# Protected labels - managed by tag-external-contributions.yml -# These rules never match, preventing sync-labels from removing them -internal: - - changed-files: - - any-glob-to-any-file: - - '.github/labeler-protected-internal-never-matches' + # Label for test changes + - label: tests + sync: true + matcher: + files: + any: + - 'tests/**' + - '**/*test*.py' + - '**/*test*.js' + - '**/*test*.ts' -external: - - changed-files: - - any-glob-to-any-file: - - '.github/labeler-protected-external-never-matches' + # Protected labels - managed by tag-external-contributions.yml + # sync: false means these labels will NEVER be removed by this action + - label: internal + sync: false + matcher: + files: + any: + - '.github/labeler-protected-internal-never-matches' + + - label: external + sync: false + matcher: + files: + any: + - '.github/labeler-protected-external-never-matches' diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index c445e1cdf6..0fc9598769 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Label PR based on file paths - uses: actions/labeler@v6 + uses: fuxingloh/multi-labeler@v4 with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - sync-labels: true # Remove labels when files no longer match + github-token: ${{ secrets.GITHUB_TOKEN }} + config-path: .github/labeler.yml diff --git a/pipeline/preprocessors/handle_auto_links.py b/pipeline/preprocessors/handle_auto_links.py index 1df76bbf6c..8aed6ecac7 100644 --- a/pipeline/preprocessors/handle_auto_links.py +++ b/pipeline/preprocessors/handle_auto_links.py @@ -100,7 +100,7 @@ def _transform_link( CROSS_REFERENCE_PATTERN = re.compile( r""" (?: # Non-capturing group for two possible formats: - @\[ # @ symbol followed by opening bracket for title + (?[^\]]+) # Custom title - one or more non-bracket characters \] # Closing bracket for title \[ # Opening bracket for link name @@ -109,7 +109,7 @@ def _transform_link( (?(backtick_with_title)`|) # Closing backtick if opening backtick present \] # Closing bracket for link name | # OR - @\[ # @ symbol followed by opening bracket + (?`)? # Optional backtick before link name (?P[^`\]]+) # Link name - non-backtick/bracket characters (?(backtick)`|) # Closing backtick if opening backtick present @@ -161,6 +161,9 @@ def replace_autolinks( based on the current conditional fence scope. Conditional fences use the syntax :::language to define scope boundaries. + Escaped autolinks (with backslash) are preserved as literal text: + \@[StateGraph] will appear as @[StateGraph] in the output without transformation. + Args: markdown: The markdown content to process. file_path: The file path for error reporting. @@ -168,13 +171,19 @@ def replace_autolinks( Returns: Processed markdown content with @[references] transformed to proper - markdown links or left unchanged if not found. + markdown links or left unchanged if not found. Escaped references are + unescaped. Example: Input: "@[StateGraph]\\n:::python\\n@[Command]\\n:::\\n" Output: "[StateGraph](url)\\n:::python\\n[Command](url)\\n:::\\n" + + Input (escaped): + "\\@[StateGraph]" + Output: + "@[StateGraph]" """ # Track the current scope context current_scope = default_scope @@ -199,4 +208,7 @@ def replace_autolinks( ) processed_lines.append(transformed_line) - return "".join(processed_lines) + result = "".join(processed_lines) + + # Unescape escaped autolinks by removing the backslash + return re.sub(r"\\(@\[)", r"\1", result) diff --git a/pipeline/tools/partner_pkg_table.py b/pipeline/tools/partner_pkg_table.py index ab7a319994..cd6c577197 100644 --- a/pipeline/tools/partner_pkg_table.py +++ b/pipeline/tools/partner_pkg_table.py @@ -184,8 +184,8 @@ def table() -> str: def doc() -> str: return f"""\ --- -title: Integration packages -sidebarTitle: Overview +title: LangChain integrations packages +sidebarTitle: LangChain integrations mode: "wide" --- {{/* File generated automatically by pipeline/tools/partner_pkg_table.py */}} diff --git a/reference/packages.yml b/reference/packages.yml index f6e2dd8e80..f634c0838b 100644 --- a/reference/packages.yml +++ b/reference/packages.yml @@ -40,44 +40,44 @@ packages: integration: false repo: langchain-ai/langchain path: libs/cli - downloads: 58000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 53000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-core integration: false repo: langchain-ai/langchain path: libs/core - downloads: 77000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 73000000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-classic integration: false repo: langchain-ai/langchain path: libs/langchain downloads: 5000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain integration: false repo: langchain-ai/langchain path: libs/langchain_v1 - downloads: 92000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 98000000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-model-profiles integration: false repo: langchain-ai/langchain path: libs/model-profiles - downloads: 9000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 6000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-tests integration: false repo: langchain-ai/langchain path: libs/standard-tests - downloads: 500000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 460000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-text-splitters integration: false repo: langchain-ai/langchain path: libs/text-splitters downloads: 29000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" # monorepo partners (alphabetical by path) - name: langchain-anthropic @@ -87,194 +87,194 @@ packages: path: libs/partners/anthropic js: "@langchain/anthropic" downloads: 7000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-chroma highlight: true repo: langchain-ai/langchain path: libs/partners/chroma js: "@langchain/community" downloads: 1000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-deepseek highlight: true repo: langchain-ai/langchain path: libs/partners/deepseek js: "@langchain/deepseek" - downloads: 471000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 500000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-exa repo: langchain-ai/langchain path: libs/partners/exa provider_page: exa_search js: "@langchain/exa" - downloads: 24000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 30000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-fireworks highlight: true repo: langchain-ai/langchain path: libs/partners/fireworks js: "@langchain/community" - downloads: 634000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 498000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-groq highlight: true repo: langchain-ai/langchain path: libs/partners/groq js: "@langchain/groq" downloads: 1000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-huggingface highlight: true repo: langchain-ai/langchain path: libs/partners/huggingface js: "@langchain/community" downloads: 1000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-mistralai highlight: true repo: langchain-ai/langchain path: libs/partners/mistralai js: "@langchain/mistralai" - downloads: 677000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 692000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-nomic repo: langchain-ai/langchain path: libs/partners/nomic js: "@langchain/nomic" - downloads: 30000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 29000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-ollama highlight: true repo: langchain-ai/langchain path: libs/partners/ollama js: "@langchain/ollama" downloads: 2000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-openai highlight: true repo: langchain-ai/langchain path: libs/partners/openai js: "@langchain/openai" - downloads: 36000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 37000000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-perplexity highlight: true repo: langchain-ai/langchain path: libs/partners/perplexity js: "@langchain/community" - downloads: 500000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 523000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-prompty repo: langchain-ai/langchain path: libs/partners/prompty provider_page: microsoft - downloads: 5000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 4000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-qdrant highlight: true repo: langchain-ai/langchain path: libs/partners/qdrant js: "@langchain/qdrant" - downloads: 409000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 418000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-xai name_title: xAI (Grok) highlight: true repo: langchain-ai/langchain path: libs/partners/xai js: "@langchain/xai" - downloads: 257000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 345000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" # internal langchain-ai org repos (alphabetical by path) - name: langchain-community integration: false repo: langchain-ai/langchain-community path: libs/community - downloads: 36000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 35000000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-experimental integration: false repo: langchain-ai/langchain-experimental path: libs/experimental downloads: 2000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-mcp-adapters integration: false repo: langchain-ai/langchain-mcp-adapters downloads: 2000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" # external langchain-ai org repos (alphabetical by path) - name: langchain-ai21 repo: langchain-ai/langchain-ai21 path: libs/ai21 downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-aws name_title: AWS highlight: true repo: langchain-ai/langchain-aws path: libs/aws js: "@langchain/aws" - downloads: 6000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 7000000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-azure-ai highlight: true repo: langchain-ai/langchain-azure path: libs/azure-ai provider_page: azure_ai js: "@langchain/openai" - downloads: 155000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 166000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-azure-dynamic-sessions repo: langchain-ai/langchain-azure path: libs/azure-dynamic-sessions provider_page: microsoft js: "@langchain/azure-dynamic-sessions" - downloads: 29000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 26000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-sqlserver repo: langchain-ai/langchain-azure path: libs/sqlserver provider_page: microsoft downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-cerebras repo: langchain-ai/langchain-cerebras path: libs/cerebras js: "@langchain/cerebras" - downloads: 93000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 89000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-cohere highlight: true repo: langchain-ai/langchain-cohere path: libs/cohere js: "@langchain/cohere" - downloads: 933000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 856000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-astradb name_title: DataStax Astra DB highlight: true repo: langchain-ai/langchain-datastax path: libs/astradb js: "@langchain/community" - downloads: 163000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 166000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-elasticsearch highlight: true repo: langchain-ai/langchain-elastic path: libs/elasticsearch js: "@langchain/community" downloads: 309000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-google-community name_title: Google (Community) repo: langchain-ai/langchain-google path: libs/community provider_page: google downloads: 7000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-google-genai name_title: Google (GenAI) highlight: true @@ -282,8 +282,8 @@ packages: path: libs/genai provider_page: google js: "@langchain/google-genai" - downloads: 8000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 7000000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-google-vertexai name_title: Google (Vertex AI) highlight: true @@ -292,495 +292,495 @@ packages: provider_page: google js: "@langchain/google-vertexai" downloads: 29000000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-ibm name_title: IBM highlight: true repo: langchain-ai/langchain-ibm path: libs/ibm js: "@langchain/ibm" - downloads: 485000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 478000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-db2 repo: langchain-ai/langchain-ibm path: libs/langchain-db2 provider_page: ibm downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-milvus highlight: true repo: langchain-ai/langchain-milvus path: libs/milvus js: "@langchain/community" - downloads: 382000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 360000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-mongodb highlight: true repo: langchain-ai/langchain-mongodb path: libs/langchain-mongodb provider_page: mongodb_atlas js: "@langchain/mongodb" - downloads: 487000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 523000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-neo4j repo: langchain-ai/langchain-neo4j path: libs/neo4j js: "@langchain/community" - downloads: 148000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 152000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-nvidia-ai-endpoints highlight: true repo: langchain-ai/langchain-nvidia path: libs/ai-endpoints provider_page: nvidia - downloads: 538000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 487000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-parallel repo: parallel-web/langchain-parallel provider_page: parallel - downloads: 772 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 553 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-pinecone highlight: true repo: langchain-ai/langchain-pinecone path: libs/pinecone js: "@langchain/pinecone" - downloads: 900000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 920000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-postgres highlight: true repo: langchain-ai/langchain-postgres provider_page: pgvector js: "@langchain/community" - downloads: 793000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 786000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-redis highlight: true repo: langchain-ai/langchain-redis path: libs/redis js: "@langchain/redis" - downloads: 139000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 137000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-sema4 repo: langchain-ai/langchain-sema4 path: libs/sema4 provider_page: robocorp - downloads: 141 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 129 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-snowflake repo: langchain-ai/langchain-snowflake path: libs/snowflake downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-together highlight: true repo: langchain-ai/langchain-together path: libs/together js: "@langchain/community" - downloads: 117000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 111000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-unstructured repo: langchain-ai/langchain-unstructured path: libs/unstructured js: "@langchain/community" - downloads: 235000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 233000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-upstage repo: langchain-ai/langchain-upstage path: libs/upstage - downloads: 45000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 34000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-weaviate repo: langchain-ai/langchain-weaviate path: libs/weaviate js: "@langchain/weaviate" - downloads: 51000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 52000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" # external repos (not organized) - name: langchain-aimlapi repo: D1m7asis/langchain-aimlapi path: libs/aimlapi - downloads: 532 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 521 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: databricks-langchain name_title: Databricks highlight: true repo: databricks/databricks-ai-bridge path: integrations/langchain js: "@langchain/community" - downloads: 784000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 752000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-couchbase repo: Couchbase-Ecosystem/langchain-couchbase - downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 2000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-box repo: box-community/langchain-box path: libs/box - downloads: 680 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 638 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-linkup repo: LinkupPlatform/langchain-linkup - downloads: 8000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 7000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-oceanbase repo: oceanbase/langchain-oceanbase downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-predictionguard repo: predictionguard/langchain-predictionguard - downloads: 5000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 4000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-cratedb repo: crate/langchain-cratedb - downloads: 201 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 471 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-modelscope repo: modelscope/langchain-modelscope - downloads: 180 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 183 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-falkordb repo: kingtroga/langchain-falkordb - downloads: 175 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 178 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-dappier repo: DappierAI/langchain-dappier - downloads: 266 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 288 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-pull-md repo: chigwell/langchain-pull-md - downloads: 136 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 126 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-kuzu repo: kuzudb/langchain-kuzu - downloads: 428 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 524 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-docling repo: DS4SD/docling-langchain downloads: 51000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-lindorm-integration name_title: Lindorm repo: AlwaysBluer/langchain-lindorm-integration provider_page: lindorm - downloads: 71 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 66 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-hyperbrowser repo: hyperbrowserai/langchain-hyperbrowser - downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 1000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-fmp-data repo: MehdiZare/langchain-fmp-data - downloads: 158 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 182 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: tilores-langchain name_title: Tilores repo: tilotech/tilores-langchain - downloads: 293 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 207 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-pipeshift repo: pipeshift-org/langchain-pipeshift - downloads: 99 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 113 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-sambanova repo: sambanova/langchain-sambanova - downloads: 127000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 132000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-jenkins repo: Amitgb14/langchain_jenkins - downloads: 332 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 367 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-nimble repo: Nimbleway/langchain-nimble - downloads: 226 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 524 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-apify repo: apify/langchain-apify - downloads: 25000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 31000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langfair name_title: LangFair repo: cvs-health/langfair downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-abso repo: lunary-ai/langchain-abso - downloads: 168 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 190 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-graph-retriever name_title: Graph RAG repo: datastax/graph-rag path: packages/langchain-graph-retriever provider_page: graph_rag - downloads: 137000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 143000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-salesforce repo: colesmcintosh/langchain-salesforce downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-discord-shikenso name_title: Discord (Shikenso) repo: Shikenso-Analytics/langchain-discord - downloads: 143 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 157 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-vdms name_title: VDMS repo: IntelLabs/langchain-vdms downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-deeplake repo: activeloopai/langchain-deeplake - downloads: 514 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 511 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-cognee repo: topoteretes/langchain-cognee - downloads: 178 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 183 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-prolog repo: apisani1/langchain-prolog - downloads: 680 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 757 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-permit repo: permitio/langchain-permit - downloads: 157 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 177 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-pymupdf4llm repo: lakinduboteju/langchain-pymupdf4llm - downloads: 20000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 21000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-writer repo: writer/langchain-writer downloads: 7000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-taiga name_title: Taiga repo: Shikenso-Analytics/langchain-taiga - downloads: 499 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 557 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-tableau name_title: Tableau repo: Tab-SE/tableau_langchain - downloads: 505 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 451 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: ads4gpts-langchain name_title: ADS4GPTs repo: ADS4GPTs/ads4gpts path: libs/python-sdk/ads4gpts-langchain - downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 980 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-contextual name_title: Contextual AI repo: ContextualAI//langchain-contextual path: langchain-contextual - downloads: 568 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 579 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-valthera name_title: Valthera repo: valthera/langchain-valthera - downloads: 167 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 187 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-opengradient repo: OpenGradient/og-langchain - downloads: 151 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 160 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: goat-sdk-adapter-langchain name_title: GOAT SDK repo: goat-sdk/goat path: python/src/adapters/langchain provider_page: goat - downloads: 305 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 203 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-netmind repo: protagolabs/langchain-netmind - downloads: 74 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 76 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-agentql repo: tinyfish-io/agentql-integrations path: langchain - downloads: 379 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 377 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-xinference repo: TheSongg/langchain-xinference - downloads: 237 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 226 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-tavily highlight: true repo: tavily-ai/langchain-tavily js: "@langchain/tavily" - downloads: 358000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 360000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-zotero-retriever name_title: Zotero repo: TimBMK/langchain-zotero-retriever provider_page: zotero - downloads: 77 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 76 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-naver name_title: Naver repo: NaverCloudPlatform/langchain-naver downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-naver-community name_title: Naver (Community) repo: e7217/langchain-naver-community provider_page: naver - downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 493 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-memgraph repo: memgraph/langchain-memgraph downloads: 21000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-vectara repo: vectara/langchain-vectara path: libs/vectara - downloads: 191 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 225 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-oxylabs repo: oxylabs/langchain-oxylabs - downloads: 354 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 188 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-runpod name_title: RunPod repo: runpod/langchain-runpod - downloads: 988 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 843 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-mariadb repo: mariadb-corporation/langchain-mariadb downloads: 3000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-qwq repo: yigit353/langchain-qwq provider_page: alibaba_cloud - downloads: 18000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 14000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-litellm name_title: LiteLLM highlight: true repo: akshay-dongare/langchain-litellm js: "n/a" - downloads: 184000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 163000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-cloudflare repo: cloudflare/langchain-cloudflare path: libs/langchain-cloudflare js: "@langchain/cloudflare" downloads: 2000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-ydb repo: ydb-platform/langchain-ydb - downloads: 491 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 476 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-singlestore name_title: SingleStore repo: singlestore-labs/langchain-singlestore - downloads: 760 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 765 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-galaxia-retriever repo: rrozanski-smabbler/galaxia-langchain provider_page: galaxia - downloads: 147 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 155 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-valyu repo: valyu-network/langchain-valyu - downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 811 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-hana name_title: SAP HANA Cloud repo: SAP/langchain-integration-for-sap-hana-cloud provider_page: sap - downloads: 21000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 19000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-gel repo: geldata/langchain-gel downloads: 142 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-brightdata repo: luminati-io/langchain-brightdata downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-featherless-ai repo: featherlessai/langchain-featherless-ai - downloads: 207 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 199 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-nebius repo: nebius/langchain-nebius path: libs/nebius - downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 2000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-surrealdb repo: surrealdb/langchain-surrealdb - downloads: 593 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 659 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-greennode repo: greennode-ai/langchain-greennode path: libs/greennode - downloads: 70 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 71 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-tensorlake repo: tensorlakeai/langchain-tensorlake - downloads: 239 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 257 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-gradient name_title: DigitalOcean Gradient AI Platform repo: digitalocean/langchain-gradient provider_page: gradientai - downloads: 411 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 751 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-anchorbrowser name_title: Anchor Browser repo: anchorbrowser/langchain-anchorbrowser provider_page: anchor_browser - downloads: 511 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 478 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: toolbox-langchain name_title: MCP Toolbox (Google) highlight: true repo: googleapis/mcp-toolbox-sdk-python path: packages/toolbox-langchain - downloads: 8000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 9000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-scrapeless repo: scrapeless-ai/langchain-scrapeless - downloads: 73 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 69 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-google-bigtable name_title: Bigtable (Google) repo: googleapis/langchain-google-bigtable-python provider_page: google - downloads: 363 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 480 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-oci name_title: Oracle Cloud Infrastructure (OCI) repo: oracle/langchain-oracle - downloads: 5000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 6000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-zeusdb repo: zeusdb/langchain-zeusdb path: libs/zeusdb - downloads: 547 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 559 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-scraperapi repo: scraperapi/langchain-scraperapi - downloads: 802 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 717 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-localai repo: mkhludnev/langchain-localai path: libs/localai downloads: 1000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-scrapegraph repo: ScrapeGraphAI/langchain-scrapegraph - downloads: 795 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 899 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-voyageai repo: voyage-ai/langchain-voyageai path: libs/voyageai - downloads: 41000 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 38000 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" - name: langchain-oracledb name_title: Oracle AI Vector Search repo: oracle/langchain-oracle provider_page: oracleai - downloads: 799 - downloads_updated_at: "2025-12-08T05:43:13.772223+00:00" + downloads: 820 + downloads_updated_at: "2025-12-15T00:06:38.115017+00:00" diff --git a/reference/python/docs/integrations/langchain_elasticsearch.md b/reference/python/docs/integrations/langchain_elasticsearch.md index 00150d72ac..bd79846feb 100644 --- a/reference/python/docs/integrations/langchain_elasticsearch.md +++ b/reference/python/docs/integrations/langchain_elasticsearch.md @@ -13,3 +13,4 @@ title: Elasticsearch ::: langchain_elasticsearch._async.retrievers.AsyncElasticsearchRetriever ::: langchain_elasticsearch._async.cache.AsyncElasticsearchCache ::: langchain_elasticsearch._async.cache.AsyncElasticsearchEmbeddingsCache +::: langchain_elasticsearch._async.chat_history.AsyncElasticsearchChatMessageHistory diff --git a/reference/python/docs/integrations/langchain_unstructured.md b/reference/python/docs/integrations/langchain_unstructured.md index 69fedab9e7..981d10c3d0 100644 --- a/reference/python/docs/integrations/langchain_unstructured.md +++ b/reference/python/docs/integrations/langchain_unstructured.md @@ -8,5 +8,4 @@ title: Unstructured [![PyPI - License](https://img.shields.io/pypi/l/langchain-unstructured)](https://opensource.org/licenses/MIT) [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain-unstructured)](https://pypistats.org/packages/langchain-unstructured) -!!! note - This package ref has not yet been fully migrated to v1. +::: langchain_unstructured diff --git a/reference/python/docs/langchain/tools.md b/reference/python/docs/langchain/tools.md index f27ab578e7..63d8e35f57 100644 --- a/reference/python/docs/langchain/tools.md +++ b/reference/python/docs/langchain/tools.md @@ -13,6 +13,9 @@ - get_input_schema - get_output_schema - response_format + - args_schema + - return_direct + - extras ::: langchain.tools.InjectedState ::: langchain.tools.InjectedStore ::: langchain.tools.InjectedToolArg diff --git a/reference/python/mkdocs.yml b/reference/python/mkdocs.yml index ac00cd9680..cb9801896b 100644 --- a/reference/python/mkdocs.yml +++ b/reference/python/mkdocs.yml @@ -440,8 +440,8 @@ nav: - Anthropic: - integrations/langchain_anthropic/index.md - ChatAnthropic: integrations/langchain_anthropic/ChatAnthropic.md - - AnthropicLLM: integrations/langchain_anthropic/AnthropicLLM.md - Middleware: integrations/langchain_anthropic/middleware.md + - AnthropicLLM: integrations/langchain_anthropic/AnthropicLLM.md - AstraDB: integrations/langchain_astradb.md - AWS: integrations/langchain_aws.md - Azure (Microsoft): diff --git a/reference/python/pyproject.dev.toml b/reference/python/pyproject.dev.toml index 3a14bd0a66..e7deb35afa 100644 --- a/reference/python/pyproject.dev.toml +++ b/reference/python/pyproject.dev.toml @@ -76,7 +76,7 @@ dependencies = [ #"langchain-snowflake", #"langchain-sqlserver", #"langchain-together", - #"langchain-unstructured", + "langchain-unstructured", "langchain-upstage", "langchain-weaviate", "langchain-tavily", @@ -109,6 +109,9 @@ override-dependencies = [ # Override httpx version to resolve conflict between langchain-google-vertexai (needs >=0.28.0) # and langchain-parallel (needs <0.28.0) "httpx>=0.28.0,<1.0.0", + # Override onnxruntime version because langchain-unstructured requires <=1.19.2 + # but those versions don't have Python 3.13 wheels (cp313) + "onnxruntime>=1.20.0", ] [tool.uv.sources] diff --git a/reference/python/pyproject.prod.toml b/reference/python/pyproject.prod.toml index 00a165809a..e70cc1c737 100644 --- a/reference/python/pyproject.prod.toml +++ b/reference/python/pyproject.prod.toml @@ -76,7 +76,7 @@ dependencies = [ #"langchain-snowflake", #"langchain-sqlserver", #"langchain-together", - #"langchain-unstructured", + "langchain-unstructured", "langchain-upstage", "langchain-weaviate", "langchain-tavily", @@ -113,6 +113,9 @@ override-dependencies = [ # Override httpx version to resolve conflict between langchain-google-vertexai (needs >=0.28.0) # and langchain-parallel (needs <0.28.0) "httpx>=0.28.0,<1.0.0", + # Override onnxruntime version because langchain-unstructured requires <=1.19.2 + # but those versions don't have Python 3.13 wheels (cp313) + "onnxruntime>=1.20.0", ] [tool.uv.sources] diff --git a/reference/python/pyproject.toml b/reference/python/pyproject.toml index 00a165809a..e70cc1c737 100644 --- a/reference/python/pyproject.toml +++ b/reference/python/pyproject.toml @@ -76,7 +76,7 @@ dependencies = [ #"langchain-snowflake", #"langchain-sqlserver", #"langchain-together", - #"langchain-unstructured", + "langchain-unstructured", "langchain-upstage", "langchain-weaviate", "langchain-tavily", @@ -113,6 +113,9 @@ override-dependencies = [ # Override httpx version to resolve conflict between langchain-google-vertexai (needs >=0.28.0) # and langchain-parallel (needs <0.28.0) "httpx>=0.28.0,<1.0.0", + # Override onnxruntime version because langchain-unstructured requires <=1.19.2 + # but those versions don't have Python 3.13 wheels (cp313) + "onnxruntime>=1.20.0", ] [tool.uv.sources] diff --git a/reference/python/uv.lock b/reference/python/uv.lock index 3e5239351a..ca4354321d 100644 --- a/reference/python/uv.lock +++ b/reference/python/uv.lock @@ -12,10 +12,20 @@ supported-markers = [ overrides = [ { name = "httpx", specifier = ">=0.28.0,<1.0.0" }, { name = "langchain-core", git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fcore" }, + { name = "onnxruntime", specifier = ">=1.20.0" }, { name = "openai", specifier = ">=2.0.0,<3.0.0" }, { name = "pytest-codspeed", specifier = ">=3.1.0,<4.0.0" }, ] +[[package]] +name = "aiofiles" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/c3/534eac40372d8ee36ef40df62ec129bee4fdb5ad9706e58a29be53b2c970/aiofiles-25.1.0.tar.gz", hash = "sha256:a8d728f0a29de45dc521f18f07297428d56992a742f0cd2701ba86e44d23d5b2", size = 46354, upload-time = "2025-10-09T20:51:04.358Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/8a/340a1555ae33d7354dbca4faa54948d76d89a27ceef032c8c3bc661d003e/aiofiles-25.1.0-py3-none-any.whl", hash = "sha256:abe311e527c862958650f9438e859c1fa7568a141b22abcd015e120e86a85695", size = 14668, upload-time = "2025-10-09T20:51:03.174Z" }, +] + [[package]] name = "aiohappyeyeballs" version = "2.6.1" @@ -427,30 +437,30 @@ wheels = [ [[package]] name = "boto3" -version = "1.40.63" +version = "1.42.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore", marker = "platform_python_implementation != 'PyPy'" }, { name = "jmespath", marker = "platform_python_implementation != 'PyPy'" }, { name = "s3transfer", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5d/fb/db063c9600cbe2c52009edd436262c696b18bafaf49835a7f17ba1679a84/boto3-1.40.63.tar.gz", hash = "sha256:3bf4b034900c87a6a9b3b3b44c4aec26e96fc73bff2505f0766224b7295178ce", size = 111541, upload-time = "2025-10-30T19:32:52.081Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/07/dfa651dbd57bfc34d952a101280928bab08ed6186f009c660a36c211ccff/boto3-1.42.9.tar.gz", hash = "sha256:cdd4cc3e5bb08ed8a0c5cc77eca78f98f0239521de0991f14e44b788b0c639b2", size = 112827, upload-time = "2025-12-12T20:33:20.236Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/d4/d977f678c60e05c19c857ad896f838152dc68e0cc28f0f026e224879d8ca/boto3-1.40.63-py3-none-any.whl", hash = "sha256:f15d4abf1a6283887c336f660cdfc2162a210d2d8f4d98dbcbcef983371c284d", size = 139322, upload-time = "2025-10-30T19:32:49.876Z" }, + { url = "https://files.pythonhosted.org/packages/7b/eb/97fdf6fbc8066fb1475b8ef260c1a58798b2b4f1e8839b501550de5d5ba1/boto3-1.42.9-py3-none-any.whl", hash = "sha256:d21d22af9aeb1bad8e9b670a221d6534c0120f7e7baf523dafaca83f1f5c3f90", size = 140561, upload-time = "2025-12-12T20:33:18.035Z" }, ] [[package]] name = "botocore" -version = "1.40.63" +version = "1.42.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath", marker = "platform_python_implementation != 'PyPy'" }, { name = "python-dateutil", marker = "platform_python_implementation != 'PyPy'" }, { name = "urllib3", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a2/08/62f4d332dd729d14190073eaf6db63803a5bc2d9b8f1248ae3cbc6c9cb64/botocore-1.40.63.tar.gz", hash = "sha256:0324552c3c800e258cbcb8c22b495a2e2e0260a7408d08016196e46fa0d1b587", size = 14400022, upload-time = "2025-10-30T19:32:40.81Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/f3/2d2cfb500e2dc00b0e33e3c8743306e6330f3cf219d19e9260dab2f3d6c2/botocore-1.42.9.tar.gz", hash = "sha256:74f69bfd116cc7c8215481284957eecdb48580e071dd50cb8c64356a866abd8c", size = 14861916, upload-time = "2025-12-12T20:33:08.017Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/45/b0/17c1e8fa8617c588da33f6724909eef56e1745ddfe2f87972d9a8e9e6ca2/botocore-1.40.63-py3-none-any.whl", hash = "sha256:83657b3ee487268fccc9ba022cba572ba657b9ece8cddd1fa241e2c6a49c8c14", size = 14061984, upload-time = "2025-10-30T19:32:36.945Z" }, + { url = "https://files.pythonhosted.org/packages/1f/2a/e9275f40042f7a09915c4be86b092cb02dc4bd74e77ab8864f485d998af1/botocore-1.42.9-py3-none-any.whl", hash = "sha256:f99ba2ca34e24c4ebec150376c815646970753c032eb84f230874b2975a185a8", size = 14537810, upload-time = "2025-12-12T20:33:04.069Z" }, ] [[package]] @@ -597,7 +607,7 @@ wheels = [ [[package]] name = "chromadb" -version = "1.3.0" +version = "1.3.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "bcrypt", marker = "platform_python_implementation != 'PyPy'" }, @@ -628,13 +638,13 @@ dependencies = [ { name = "typing-extensions", marker = "platform_python_implementation != 'PyPy'" }, { name = "uvicorn", extra = ["standard"], marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/50/b0/28fbd8985412ea903b0c43a0a50d2b49598242cadc38cac787637ed00973/chromadb-1.3.0.tar.gz", hash = "sha256:9fa223504e07477d019e7efd9e121ead89f9a177940bffabd31d5e473e4afafc", size = 1904155, upload-time = "2025-10-29T03:07:16.642Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/b9/23eb242c0bad56bcac57d9f45a6cc85e016a44ae9baf763c0d040e45e2d7/chromadb-1.3.7.tar.gz", hash = "sha256:393b866b6ac60c12fc0f2a43d07b2884f2d02a68a1b2cb43c5ef87d141543571", size = 1960950, upload-time = "2025-12-12T21:03:13.941Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/80/bf/274f0922e72a3fc9180278e10b2d80763e35139d0b16b11c5f271cc0479c/chromadb-1.3.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:7996c5f389b5b63cbfec55dcd5982bddb8ceff6bb1de35cdf8daf7bff9a3ce3f", size = 20063503, upload-time = "2025-10-29T03:07:13.863Z" }, - { url = "https://files.pythonhosted.org/packages/3c/e4/4f4613f426ce1e4a96c2586478a67c91923f093e926560b3181ad51e80b7/chromadb-1.3.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:a6d301c9ef3e3ac52dccbfd544589142f5a2c6b746d035ac9b7c59440c6835ce", size = 19152851, upload-time = "2025-10-29T03:07:10.874Z" }, - { url = "https://files.pythonhosted.org/packages/5c/49/842e7bc60bd81e8fdec239999c4c05eece8fac283253c2feaca378571356/chromadb-1.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3199ccd8730560baa7b25a33993d2a3acb8791d5c935f98873f4cfcc2e2ac85b", size = 19717704, upload-time = "2025-10-29T03:07:05.268Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e1/ca0e8fc1146718e41b5afb27dfdf9cc999900b5890814ffb3940a108030b/chromadb-1.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:720ec8e4edcd6fba56a7743569b46ed4ceaeb2050fc0000b674f17033d746ed4", size = 20828998, upload-time = "2025-10-29T03:07:08.074Z" }, - { url = "https://files.pythonhosted.org/packages/14/8e/1d52110b7f33d42b0d655f3ef2d6a4f6a10fe8229f0a4728a37e8e055eb8/chromadb-1.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:b153b8d3293fe182f5937309f70ad9cd3c5c45171464cf6c9dbb2d70b7f0d4ba", size = 20802636, upload-time = "2025-10-29T03:07:18.741Z" }, + { url = "https://files.pythonhosted.org/packages/b6/9d/306e220cfb4382e9f29e645339826d1deec64c34cf905c344d0d7345dbdb/chromadb-1.3.7-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:74839c349a740b8e349fabc569f8f4becae9806fa8ff9ca186797bef1f54ee4c", size = 20816599, upload-time = "2025-12-12T21:03:11.173Z" }, + { url = "https://files.pythonhosted.org/packages/51/3e/0fbb4c6e7971019c976cf3dbef1c22c1a3089f74ef86c88e2e066edc47e4/chromadb-1.3.7-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:fe9c96f73450274d9f722572afc9d455b4f6f4cd960fa49e4bf489075ef30e6f", size = 20113076, upload-time = "2025-12-12T21:03:07.873Z" }, + { url = "https://files.pythonhosted.org/packages/69/78/2ae4064c9b194271b9c2bc66a26a7e11363d13ed2bd691a563fac1a7c5f2/chromadb-1.3.7-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:972cb168033db76a4bb1031bc38b6cc4e6d05ef716c1ffce8ae95a1a3b515dd2", size = 20738619, upload-time = "2025-12-12T21:03:01.409Z" }, + { url = "https://files.pythonhosted.org/packages/01/5d/3aa34cb02c3c0e4920a47da5d9092cab690fcbf6df13ec744eacf96891d6/chromadb-1.3.7-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e05190236e309b54165866dd11676c2702a35b73beaa29502741f22f333c51a", size = 21654395, upload-time = "2025-12-12T21:03:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/00/36/7d2d7b6bb26e53214492d71ccb4e128fa2de4d98a215befb7787deaf2701/chromadb-1.3.7-cp39-abi3-win_amd64.whl", hash = "sha256:4618ba7bb5ef5dbf0d4fd9ce708b912d8cd1ab24d3c81e0e092841f325b2c94d", size = 21874973, upload-time = "2025-12-12T21:03:16.918Z" }, ] [[package]] @@ -753,11 +763,12 @@ wheels = [ [[package]] name = "deepagents" version = "0.3.0" -source = { git = "https://github.com/langchain-ai/deepagents.git?subdirectory=libs%2Fdeepagents#6a9074f58b4dd7b0302749b84b87f6537d15c4b6" } +source = { git = "https://github.com/langchain-ai/deepagents.git?subdirectory=libs%2Fdeepagents#e7b8b808f52deb98da3582b1100bb787c2ec2eb7" } dependencies = [ { name = "langchain", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-anthropic", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, + { name = "langchain-google-genai", marker = "platform_python_implementation != 'PyPy'" }, { name = "wcmatch", marker = "platform_python_implementation != 'PyPy'" }, ] @@ -1798,8 +1809,8 @@ wheels = [ [[package]] name = "langchain" -version = "1.1.3" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain_v1#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.2.0" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain_v1#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph", marker = "platform_python_implementation != 'PyPy'" }, @@ -1808,8 +1819,8 @@ dependencies = [ [[package]] name = "langchain-anthropic" -version = "1.2.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fanthropic#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.3.1" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fanthropic#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "anthropic", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1829,7 +1840,7 @@ dependencies = [ [[package]] name = "langchain-aws" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Faws#6ae571c73131c9d61c1c02eb30a9874cb7ff5631" } +source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Faws#5bc8b867ef60356a8dd03f861a219f219ae029c2" } dependencies = [ { name = "boto3", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1840,7 +1851,7 @@ dependencies = [ [[package]] name = "langchain-azure-ai" version = "1.0.4" -source = { git = "https://github.com/langchain-ai/langchain-azure.git?subdirectory=libs%2Fazure-ai#77d6d6f3bd89835c2db5fe5f3c0948d1f3ea0aaf" } +source = { git = "https://github.com/langchain-ai/langchain-azure.git?subdirectory=libs%2Fazure-ai#f553e3787ebe7b1faff5ff1ff850319d404e11c3" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "azure-ai-agents", marker = "platform_python_implementation != 'PyPy'" }, @@ -1859,7 +1870,7 @@ dependencies = [ [[package]] name = "langchain-azure-postgresql" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain-azure.git?subdirectory=libs%2Fazure-postgresql#77d6d6f3bd89835c2db5fe5f3c0948d1f3ea0aaf" } +source = { git = "https://github.com/langchain-ai/langchain-azure.git?subdirectory=libs%2Fazure-postgresql#f553e3787ebe7b1faff5ff1ff850319d404e11c3" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "azure-identity", marker = "platform_python_implementation != 'PyPy'" }, @@ -1874,7 +1885,7 @@ dependencies = [ [[package]] name = "langchain-azure-storage" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain-azure.git?subdirectory=libs%2Fazure-storage#77d6d6f3bd89835c2db5fe5f3c0948d1f3ea0aaf" } +source = { git = "https://github.com/langchain-ai/langchain-azure.git?subdirectory=libs%2Fazure-storage#f553e3787ebe7b1faff5ff1ff850319d404e11c3" } dependencies = [ { name = "azure-identity", marker = "platform_python_implementation != 'PyPy'" }, { name = "azure-storage-blob", extra = ["aio"], marker = "platform_python_implementation != 'PyPy'" }, @@ -1892,8 +1903,8 @@ dependencies = [ [[package]] name = "langchain-chroma" -version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fchroma#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.1.0" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fchroma#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "chromadb", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1903,7 +1914,7 @@ dependencies = [ [[package]] name = "langchain-classic" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-text-splitters", marker = "platform_python_implementation != 'PyPy'" }, @@ -1946,8 +1957,8 @@ dependencies = [ [[package]] name = "langchain-core" -version = "1.1.3" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fcore#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.2.1" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fcore#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "jsonpatch", marker = "platform_python_implementation != 'PyPy'" }, { name = "langsmith", marker = "platform_python_implementation != 'PyPy'" }, @@ -1962,7 +1973,7 @@ dependencies = [ [[package]] name = "langchain-db2" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain-ibm.git?subdirectory=libs%2Flangchain-db2#c2eb7351e7631a0f2aa9e1f0f069fcab3fbe9607" } +source = { git = "https://github.com/langchain-ai/langchain-ibm.git?subdirectory=libs%2Flangchain-db2#96ba33cf5e5c091872fa6f606b2b735617b7e6e2" } dependencies = [ { name = "ibm-db", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-community", marker = "platform_python_implementation != 'PyPy'" }, @@ -1972,7 +1983,7 @@ dependencies = [ [[package]] name = "langchain-deepseek" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fdeepseek#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fdeepseek#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-openai", marker = "platform_python_implementation != 'PyPy'" }, @@ -1981,7 +1992,7 @@ dependencies = [ [[package]] name = "langchain-elasticsearch" version = "0.4.0" -source = { git = "https://github.com/langchain-ai/langchain-elastic.git?subdirectory=libs%2Felasticsearch#f9d82e15700ac26f4e3e409fa5ca16e712fb84b3" } +source = { git = "https://github.com/langchain-ai/langchain-elastic.git?subdirectory=libs%2Felasticsearch#a6383faccbc8e6353c9700c3aea5aaf2d5395691" } dependencies = [ { name = "elasticsearch", extra = ["vectorstore-mmr"], marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1990,7 +2001,7 @@ dependencies = [ [[package]] name = "langchain-exa" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fexa#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fexa#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "exa-py", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -1999,7 +2010,7 @@ dependencies = [ [[package]] name = "langchain-fireworks" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Ffireworks#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Ffireworks#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "fireworks-ai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2011,7 +2022,7 @@ dependencies = [ [[package]] name = "langchain-google-community" version = "3.0.2" -source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fcommunity#5b245cb6d150761379b7e78b5d6c75d2b561c992" } +source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fcommunity#07a4266ec5d776950c9baa13ea8a900005abbf25" } dependencies = [ { name = "google-api-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "google-api-python-client", marker = "platform_python_implementation != 'PyPy'" }, @@ -2025,7 +2036,7 @@ dependencies = [ [[package]] name = "langchain-google-genai" version = "4.0.0" -source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fgenai#5b245cb6d150761379b7e78b5d6c75d2b561c992" } +source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fgenai#07a4266ec5d776950c9baa13ea8a900005abbf25" } dependencies = [ { name = "filetype", marker = "platform_python_implementation != 'PyPy'" }, { name = "google-genai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2036,7 +2047,7 @@ dependencies = [ [[package]] name = "langchain-google-vertexai" version = "3.2.0" -source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fvertexai#5b245cb6d150761379b7e78b5d6c75d2b561c992" } +source = { git = "https://github.com/langchain-ai/langchain-google.git?subdirectory=libs%2Fvertexai#07a4266ec5d776950c9baa13ea8a900005abbf25" } dependencies = [ { name = "bottleneck", marker = "platform_python_implementation != 'PyPy'" }, { name = "google-cloud-aiplatform", marker = "platform_python_implementation != 'PyPy'" }, @@ -2052,8 +2063,8 @@ dependencies = [ [[package]] name = "langchain-groq" -version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fgroq#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.1.1" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fgroq#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "groq", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2061,8 +2072,8 @@ dependencies = [ [[package]] name = "langchain-huggingface" -version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fhuggingface#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.2.0" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fhuggingface#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "huggingface-hub", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2071,8 +2082,8 @@ dependencies = [ [[package]] name = "langchain-ibm" -version = "1.0.1" -source = { git = "https://github.com/langchain-ai/langchain-ibm.git?subdirectory=libs%2Fibm#c2eb7351e7631a0f2aa9e1f0f069fcab3fbe9607" } +version = "1.0.2" +source = { git = "https://github.com/langchain-ai/langchain-ibm.git?subdirectory=libs%2Fibm#96ba33cf5e5c091872fa6f606b2b735617b7e6e2" } dependencies = [ { name = "ibm-watsonx-ai", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2091,7 +2102,7 @@ dependencies = [ [[package]] name = "langchain-milvus" version = "0.3.1" -source = { git = "https://github.com/langchain-ai/langchain-milvus.git?subdirectory=libs%2Fmilvus#01a4c6903d4893ffc635ba564c99af8a08d1a437" } +source = { git = "https://github.com/langchain-ai/langchain-milvus.git?subdirectory=libs%2Fmilvus#b3967136712df93673d2f488602ab0c7ec424e3b" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "pymilvus", marker = "platform_python_implementation != 'PyPy'" }, @@ -2099,8 +2110,8 @@ dependencies = [ [[package]] name = "langchain-mistralai" -version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fmistralai#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.1.1" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fmistralai#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "httpx-sse", marker = "platform_python_implementation != 'PyPy'" }, @@ -2123,7 +2134,7 @@ dependencies = [ [[package]] name = "langchain-nomic" version = "1.0.1" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fnomic#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fnomic#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "nomic", marker = "platform_python_implementation != 'PyPy'" }, @@ -2133,7 +2144,7 @@ dependencies = [ [[package]] name = "langchain-nvidia-ai-endpoints" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain-nvidia.git?subdirectory=libs%2Fai-endpoints#db54227776cd8e8486d2e5900122a50c6b7fffc0" } +source = { git = "https://github.com/langchain-ai/langchain-nvidia.git?subdirectory=libs%2Fai-endpoints#2970b0b9857046bb7ac87aa9e00d32b4e90035d6" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "filetype", marker = "platform_python_implementation != 'PyPy'" }, @@ -2142,8 +2153,8 @@ dependencies = [ [[package]] name = "langchain-ollama" -version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Follama#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.0.1" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Follama#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "ollama", marker = "platform_python_implementation != 'PyPy'" }, @@ -2151,8 +2162,8 @@ dependencies = [ [[package]] name = "langchain-openai" -version = "1.1.1" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fopenai#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.1.3" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fopenai#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "openai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2174,7 +2185,7 @@ dependencies = [ [[package]] name = "langchain-perplexity" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fperplexity#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fperplexity#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "openai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2197,7 +2208,7 @@ dependencies = [ [[package]] name = "langchain-prompty" version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fprompty#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fprompty#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "pyyaml", marker = "platform_python_implementation != 'PyPy'" }, @@ -2206,7 +2217,7 @@ dependencies = [ [[package]] name = "langchain-qdrant" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fqdrant#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fqdrant#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "pydantic", marker = "platform_python_implementation != 'PyPy'" }, @@ -2279,6 +2290,7 @@ dependencies = [ { name = "langchain-tavily", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-tests", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-text-splitters", marker = "platform_python_implementation != 'PyPy'" }, + { name = "langchain-unstructured", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-upstage", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-weaviate", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-xai", marker = "platform_python_implementation != 'PyPy'" }, @@ -2353,6 +2365,7 @@ requires-dist = [ { name = "langchain-tavily", git = "https://github.com/tavily-ai/langchain-tavily.git" }, { name = "langchain-tests", git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fstandard-tests" }, { name = "langchain-text-splitters", git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Ftext-splitters" }, + { name = "langchain-unstructured", git = "https://github.com/langchain-ai/langchain-unstructured.git?subdirectory=libs%2Funstructured" }, { name = "langchain-upstage", git = "https://github.com/langchain-ai/langchain-upstage.git?subdirectory=libs%2Fupstage" }, { name = "langchain-weaviate", git = "https://github.com/langchain-ai/langchain-weaviate.git?subdirectory=libs%2Fweaviate&branch=v1.0" }, { name = "langchain-xai", git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fxai" }, @@ -2383,8 +2396,8 @@ requires-dist = [ [[package]] name = "langchain-tavily" -version = "0.2.13" -source = { git = "https://github.com/tavily-ai/langchain-tavily.git#3df3084a54da33efffa13f92f8ef95b5d55aa30e" } +version = "0.2.14" +source = { git = "https://github.com/tavily-ai/langchain-tavily.git#7f4756fc3022820b2f997fece62ac95c433009a4" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain", marker = "platform_python_implementation != 'PyPy'" }, @@ -2394,8 +2407,8 @@ dependencies = [ [[package]] name = "langchain-tests" -version = "1.0.2" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fstandard-tests#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.1.0" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fstandard-tests#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2412,10 +2425,12 @@ dependencies = [ [[package]] name = "langchain-text-splitters" -version = "1.0.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Ftext-splitters#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +version = "1.1.0" +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Ftext-splitters#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, + { name = "onnxruntime", marker = "platform_python_implementation != 'PyPy'" }, + { name = "unstructured-client", marker = "platform_python_implementation != 'PyPy'" }, ] [[package]] @@ -2444,7 +2459,7 @@ dependencies = [ [[package]] name = "langchain-xai" version = "1.1.0" -source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fxai#ff6e3558d764ccfd34db9d008abd37411dc758b0" } +source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fxai#b8a76cb6e9aff689f0113bbdaf0f15a0f5786e6a" } dependencies = [ { name = "aiohttp", marker = "platform_python_implementation != 'PyPy'" }, { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, @@ -2454,8 +2469,8 @@ dependencies = [ [[package]] name = "langgraph" -version = "1.0.4" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Flanggraph#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +version = "1.0.5" +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Flanggraph#87cb5095285ba809b547584d31bae26046fc844a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, @@ -2468,7 +2483,7 @@ dependencies = [ [[package]] name = "langgraph-checkpoint" version = "3.0.1" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint#87cb5095285ba809b547584d31bae26046fc844a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "ormsgpack", marker = "platform_python_implementation != 'PyPy'" }, @@ -2476,8 +2491,8 @@ dependencies = [ [[package]] name = "langgraph-checkpoint-aws" -version = "1.0.1" -source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Flanggraph-checkpoint-aws#6ae571c73131c9d61c1c02eb30a9874cb7ff5631" } +version = "1.0.2" +source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Flanggraph-checkpoint-aws#5bc8b867ef60356a8dd03f861a219f219ae029c2" } dependencies = [ { name = "boto3", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph", marker = "platform_python_implementation != 'PyPy'" }, @@ -2487,7 +2502,7 @@ dependencies = [ [[package]] name = "langgraph-checkpoint-postgres" version = "3.0.2" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-postgres#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-postgres#87cb5095285ba809b547584d31bae26046fc844a" } dependencies = [ { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, @@ -2498,7 +2513,7 @@ dependencies = [ [[package]] name = "langgraph-checkpoint-sqlite" version = "3.0.1" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-sqlite#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fcheckpoint-sqlite#87cb5095285ba809b547584d31bae26046fc844a" } dependencies = [ { name = "aiosqlite", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, @@ -2508,7 +2523,7 @@ dependencies = [ [[package]] name = "langgraph-prebuilt" version = "1.0.5" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fprebuilt#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fprebuilt#87cb5095285ba809b547584d31bae26046fc844a" } dependencies = [ { name = "langchain-core", marker = "platform_python_implementation != 'PyPy'" }, { name = "langgraph-checkpoint", marker = "platform_python_implementation != 'PyPy'" }, @@ -2516,8 +2531,8 @@ dependencies = [ [[package]] name = "langgraph-sdk" -version = "0.2.15" -source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fsdk-py#4d01e69b823c4126f375cbed72fed7c5797d1b91" } +version = "0.3.0" +source = { git = "https://github.com/langchain-ai/langgraph?subdirectory=libs%2Fsdk-py#87cb5095285ba809b547584d31bae26046fc844a" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, @@ -2544,8 +2559,8 @@ dependencies = [ [[package]] name = "langsmith" -version = "0.4.58" -source = { git = "https://github.com/langchain-ai/langsmith-sdk.git?subdirectory=python#97f8b9b9f2a30c426b6adb77f708bbc68b94a4e3" } +version = "0.4.59" +source = { git = "https://github.com/langchain-ai/langsmith-sdk.git?subdirectory=python#c45bf95e53c0c40001fd290dba23e648d8aff13c" } dependencies = [ { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, @@ -3945,11 +3960,11 @@ wheels = [ [[package]] name = "pypdf" -version = "6.1.3" +version = "6.4.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/13/3d/b6ead84ee437444f96862beb68f9796da8c199793bed08e9397b77579f23/pypdf-6.1.3.tar.gz", hash = "sha256:8d420d1e79dc1743f31a57707cabb6dcd5b17e8b9a302af64b30202c5700ab9d", size = 5076271, upload-time = "2025-10-22T16:13:46.061Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/c2/b59b02ff7f2dc006799d2c5dc3a8877686890abdd915176ef799070edf17/pypdf-6.4.2.tar.gz", hash = "sha256:c466ff1272ffb4712c2348d2bbc3019bc93f1c62ccfaf50808e3b9f13c3dc527", size = 5275502, upload-time = "2025-12-14T14:30:58.58Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/ed/494fd0cc1190a7c335e6958eeaee6f373a281869830255c2ed4785dac135/pypdf-6.1.3-py3-none-any.whl", hash = "sha256:eb049195e46f014fc155f566fa20e09d70d4646a9891164ac25fa0cbcfcdbcb5", size = 323863, upload-time = "2025-10-22T16:13:44.174Z" }, + { url = "https://files.pythonhosted.org/packages/38/99/3147435e15ccd97c0451efc3d13495dc22602e9887f81e64f1b135bae821/pypdf-6.4.2-py3-none-any.whl", hash = "sha256:014dcff867fd99fc0b6fc90ed1f7e1347ef2317ae038a489c2caa64106d268f4", size = 328212, upload-time = "2025-12-14T14:30:56.701Z" }, ] [[package]] @@ -4371,14 +4386,14 @@ wheels = [ [[package]] name = "s3transfer" -version = "0.14.0" +version = "0.16.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/62/74/8d69dcb7a9efe8baa2046891735e5dfe433ad558ae23d9e3c14c633d1d58/s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125", size = 151547, upload-time = "2025-09-09T19:23:31.089Z" } +sdist = { url = "https://files.pythonhosted.org/packages/05/04/74127fc843314818edfa81b5540e26dd537353b123a4edc563109d8f17dd/s3transfer-0.16.0.tar.gz", hash = "sha256:8e990f13268025792229cd52fa10cb7163744bf56e719e0b9cb925ab79abf920", size = 153827, upload-time = "2025-12-01T02:30:59.114Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/48/f0/ae7ca09223a81a1d890b2557186ea015f6e0502e9b8cb8e1813f1d8cfa4e/s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456", size = 85712, upload-time = "2025-09-09T19:23:30.041Z" }, + { url = "https://files.pythonhosted.org/packages/fc/51/727abb13f44c1fcf6d145979e1535a35794db0f6e450a0cb46aa24732fe2/s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:18e25d66fed509e3868dc1572b3f427ff947dd2c56f844a5bf09481ad3f3b2fe", size = 86830, upload-time = "2025-12-01T02:30:57.729Z" }, ] [[package]] @@ -4785,6 +4800,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, ] +[[package]] +name = "unstructured-client" +version = "0.42.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiofiles", marker = "platform_python_implementation != 'PyPy'" }, + { name = "cryptography", marker = "platform_python_implementation != 'PyPy'" }, + { name = "httpcore", marker = "platform_python_implementation != 'PyPy'" }, + { name = "httpx", marker = "platform_python_implementation != 'PyPy'" }, + { name = "pydantic", marker = "platform_python_implementation != 'PyPy'" }, + { name = "pypdf", marker = "platform_python_implementation != 'PyPy'" }, + { name = "requests-toolbelt", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a4/8f/43c9a936a153e62f18e7629128698feebd81d2cfff2835febc85377b8eb8/unstructured_client-0.42.4.tar.gz", hash = "sha256:144ecd231a11d091cdc76acf50e79e57889269b8c9d8b9df60e74cf32ac1ba5e", size = 91404, upload-time = "2025-11-14T16:59:25.131Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/6c/7c69e4353e5bdd05fc247c2ec1d840096eb928975697277b015c49405b0f/unstructured_client-0.42.4-py3-none-any.whl", hash = "sha256:fc6341344dd2f2e2aed793636b5f4e6204cad741ff2253d5a48ff2f2bccb8e9a", size = 207863, upload-time = "2025-11-14T16:59:23.674Z" }, +] + [[package]] name = "uritemplate" version = "4.2.0" diff --git a/src/docs.json b/src/docs.json index f8379607fb..b9a3e234f2 100644 --- a/src/docs.json +++ b/src/docs.json @@ -305,7 +305,7 @@ "tab": "Integrations", "pages": [ "oss/python/integrations/providers/overview", - "oss/python/integrations/providers/all_providers", + "oss/python/langgraph/integrations","oss/python/integrations/providers/all_providers", { "group": "Popular Providers", "icon": "user-group", @@ -625,6 +625,7 @@ "tab": "Integrations", "pages": [ "oss/javascript/integrations/providers/overview", + "oss/javascript/langgraph/integrations", "oss/javascript/integrations/providers/all_providers", { "group": "Popular Providers", @@ -635,14 +636,16 @@ "pages": [ "oss/javascript/integrations/providers/openai", "oss/javascript/integrations/chat/openai", - "oss/javascript/integrations/text_embedding/openai" + "oss/javascript/integrations/text_embedding/openai", + "oss/javascript/integrations/tools/openai" ] }, { "group": "Anthropic", "pages": [ "oss/javascript/integrations/providers/anthropic", - "oss/javascript/integrations/chat/anthropic" + "oss/javascript/integrations/chat/anthropic", + "oss/javascript/integrations/tools/anthropic" ] }, { @@ -845,13 +848,6 @@ "group": "Additional resources", "pages": [ "langsmith/polly", - { - "group": "Releases & changelogs", - "pages": [ - "langsmith/agent-server-changelog", - "langsmith/release-versions" - ] - }, { "group": "Data management", "pages": [ @@ -1445,7 +1441,7 @@ { "group": "LangSmith Deployment", "pages": [ - { + { "group": "Agent Server API", "pages": [ "langsmith/server-api-ref" @@ -1455,11 +1451,23 @@ "directory": "langsmith/agent-server-api" } }, - "langsmith/api-ref-control-plane", + { + "group": "Control Plane API", + "openapi": { + "source": "https://api.host.langchain.com/openapi.json" + } + }, "langsmith/cli", "langsmith/remote-graph", "langsmith/env-var" ] + }, + { + "group": "Releases", + "pages": [ + "langsmith/agent-server-changelog", + "langsmith/release-versions" + ] } ] } diff --git a/src/langsmith/agent-server-changelog.mdx b/src/langsmith/agent-server-changelog.mdx index aee77a3a94..713ecadead 100644 --- a/src/langsmith/agent-server-changelog.mdx +++ b/src/langsmith/agent-server-changelog.mdx @@ -1,140 +1,231 @@ --- title: Agent Server changelog sidebarTitle: Agent Server changelog +rss: true --- + +**Subscribe**: Our changelog includes an [RSS feed](https://docs.langchain.com/langsmith/agent-server-changelog/rss.xml) that can integrate with [Slack](https://slack.com/help/articles/218688467-Add-RSS-feeds-to-Slack), [email](https://zapier.com/apps/email/integrations/rss/1441/send-new-rss-feed-entries-via-email), Discord bots like [Readybot](https://readybot.io/) or [RSS Feeds to Discord Bot](https://rss.app/en/bots/rssfeeds-discord-bot), and other subscription tools. + + + [Agent Server](/langsmith/agent-server) is an API platform for creating and managing agent-based applications. It provides built-in persistence, a task queue, and supports deploying, configuring, and running assistants (agentic workflows) at scale. This changelog documents all notable updates, features, and fixes to Agent Server releases. - + +## v0.5.42 + +- Modified the Go server to rely solely on the CLI `-service` flag for determining service mode, ignoring the globally set `FF_USE_CORE_API` for better deployment specificity. + + + +## v0.5.41 + +Fixed an issue with cron jobs in hybrid mode by ensuring proper initialization of the ENTERPRISE_SAAS global flag. + + + +## v0.5.39 + +- Completed the implementation of custom encryptions for runs and crons, along with simplifying encryption processes. +- Introduced support for streaming subgraph events in both `values` and `updates` stream modes. + + + +## v0.5.38 + +- Implemented complete custom encryption for threads, ensuring all thread data is properly secured and encrypted. +- Ensured Redis attempt flags are consistently expired to prevent stale data. +- Added core authentication and support for OR/AND filters, enhancing security and flexibility. + + + ## v0.5.37 -- Added a `name` parameter to the assistants count API for improved search flexibility. - +Added a `name` parameter to the assistants count API for improved search flexibility. + + + ## v0.5.36 + - Introduced configurable webhook support, allowing users to customize submitted webhooks and headers. - Added an `/ok` endpoint at the root for easier health checks and simplified configuration. + - + ## v0.5.34 -- Introduced custom encryption middleware, allowing users to define their own encryption methods for enhanced data protection. - +Introduced custom encryption middleware, allowing users to define their own encryption methods for enhanced data protection. + + + ## v0.5.33 -- Set Uvicorn's keep-alive timeout to 75 seconds to prevent occasional 502 errors and improve connection handling. - +Set Uvicorn's keep-alive timeout to 75 seconds to prevent occasional 502 errors and improve connection handling. + + + ## v0.5.32 -- Introduced OpenTelemetry telemetry agent with support for New Relic integration. - +Introduced OpenTelemetry telemetry agent with support for New Relic integration. + + + ## v0.5.31 -- Added Py-Spy profiling for improved analysis of deployment performance, with some limitations on coverage. - +Added Py-Spy profiling for improved analysis of deployment performance, with some limitations on coverage. + + + ## v0.5.30 + - Always configure loopback transport clients to enhance reliability. - Ensured authentication headers are passed for remote non-stream methods in JS. + - + ## v0.5.28 + - Introduced a faster, Rust-based implementation of uuid7 to improve performance, now used in langsmith and langchain-core. - Added support for `$or` and `$and` in PostgreSQL auth filters to enable complex logic in authentication checks. - Capped psycopg and psycopg-pool versions to prevent infinite waiting on startup. + - + ## v0.5.27 + - Ensured `runs.list` with filters returns only run fields, preventing incorrect status data from being included. - (JS) Updated `uuid` from version 10.0.0 to 13.0.0. and `exit-hook` from version 4.0.0 to 5.0.1. + - + ## v0.5.26 -- Resolved issues with `store.put` when used without AsyncBatchedStore in the JavaScript environment. - +Resolved issues with `store.put` when used without AsyncBatchedStore in the JavaScript environment. + + + ## v0.5.25 + - Introduced the ability to search assistants by their `name` using a new endpoint. - Casted store_get return types to tuple in JavaScript to ensure type consistency. + - + ## v0.5.24 + - Added executor metrics for Datadog and enhanced core stream API metrics for better performance tracking. - Disabled Redis Go maintenance notifications to prevent startup errors with unsupported commands in Redis versions below 8. + - + ## v0.5.20 -- Resolved an error in the executor service that occurred when handling large messages. - +Resolved an error in the executor service that occurred when handling large messages. + + + ## v0.5.19 -- Upgraded built-in langchain-core to version 1.0.7 to address a prompt formatting vulnerability. - +Upgraded built-in langchain-core to version 1.0.7 to address a prompt formatting vulnerability. + + + ## v0.5.18 -- Introduced persistent cron threads with `on_run_completed: {keep,delete}` for enhanced cron management and retrieval options. - +Introduced persistent cron threads with `on_run_completed: {keep,delete}` for enhanced cron management and retrieval options. + + + ## v0.5.17 -- Enhanced task handling to support multiple interrupts, aligning with open-source functionality. - +Enhanced task handling to support multiple interrupts, aligning with open-source functionality. + + + ## v0.5.15 -- Added custom JSON unmarshalling for `Resume` and `Goto` commands to fix map-style null resume interpretation issues. - +Added custom JSON unmarshalling for `Resume` and `Goto` commands to fix map-style null resume interpretation issues. + + + ## v0.5.14 -- Ensured `pg make start` command functions correctly with core-api enabled. - +Ensured `pg make start` command functions correctly with core-api enabled. + + + ## v0.5.13 -- Support `include` and `exclude` (plural form key for `includes` and `excludes`) since a doc incorrectly claimed support for that. Now the server accepts either. - +Support `include` and `exclude` (plural form key for `includes` and `excludes`) since a doc incorrectly claimed support for that. Now the server accepts either. + + + ## v0.5.11 + - Ensured auth handlers are applied consistently when streaming threads, aligning with recent security practices. - Bumped `undici` dependency from version 6.21.3 to 7.16.0, introducing various performance improvements and bug fixes. - Updated `p-queue` from version 8.0.1 to 9.0.0, introducing new features and breaking changes, including the removal of the `throwOnTimeout` option. + - + ## v0.5.10 -- Implemented healthcheck calls in the queue /ok handler to improve Kubernetes liveness and readiness probe compatibility. - +Implemented healthcheck calls in the queue /ok handler to improve Kubernetes liveness and readiness probe compatibility. + + + ## v0.5.9 + - Resolved an issue causing an "unbound local error" for the `elapsed` variable during a SIGINT interruption. - Mapped the "interrupted" status to A2A's "input-required" status for better task status alignment. + - + ## v0.5.8 + - Ensured environment variables are passed as a dictionary when starting langgraph-ui for compatibility with `uvloop`. - Implemented CRUD operations for runs in Go, simplifying JSON merges and improving transaction readability, with PostgreSQL as a reference. + - + ## v0.5.7 -- Replaced no-retry Redis client with a retry client to handle connection errors more effectively and reduced corresponding logging severity. - +Replaced no-retry Redis client with a retry client to handle connection errors more effectively and reduced corresponding logging severity. + + + ## v0.5.6 + - Added pending time metrics to provide better insights into task waiting times. - Replaced `pb.Value` with `ChannelValue` to streamline code structure. + - + ## v0.5.5 -- Made the Redis `health_check_interval` more frequent and configurable for better handling of idle connections. - +Made the Redis `health_check_interval` more frequent and configurable for better handling of idle connections. + + + ## v0.5.4 -- Implemented `ormsgpack` with `OPT_REPLACE_SURROGATES` and updated for compatibility with the latest FastAPI release affecting custom authentication dependencies. - +Implemented `ormsgpack` with `OPT_REPLACE_SURROGATES` and updated for compatibility with the latest FastAPI release affecting custom authentication dependencies. + + + ## v0.5.2 -- Added retry logic for PostgreSQL connections during startup to enhance deployment reliability and improved error logging for easier debugging. - +Added retry logic for PostgreSQL connections during startup to enhance deployment reliability and improved error logging for easier debugging. + + + ## v0.5.1 + - Resolved an issue where persistence was not functioning correctly with LangChain.js's createAgent feature. - Optimized assistants CRUD performance by improving database connection pooling and gRPC client reuse, reducing latency for large payloads. + - + ## v0.5.0 This minor version now requires langgraph-checkpoint versions later than 3.0 to prevent a deserialization vulnerability in earlier versions of the langgraph-checkpoint library. @@ -154,9 +245,11 @@ By default, objects are serialized using msgpack. Under certain uncommon situati } } ``` + - + ## v0.4.47 + - Validated and auto-corrected environment configuration types using TypeAdapter. - Added support for LangChain.js and LangGraph.js version 1.x, ensuring compatibility. - Updated hono library from version 4.9.7 to 4.10.3, addressing a CORS middleware security issue and enhancing JWT audience validation. @@ -165,88 +258,120 @@ By default, objects are serialized using msgpack. Under certain uncommon situati - Updated `hono` package from version 4.9.7 to 4.10.2, including security improvements for JWT audience validation. - Updated `hono` dependency from version 4.9.7 to 4.10.3 to fix a security issue and improve CORS middleware handling. - Introduced basic CRUD operations for threads, including create, get, patch, delete, search, count, and copy, with support for Go, gRPC server, and Python and TypeScript clients. + - + ## v0.4.46 -- Added an option to enable message streaming from subgraph events, giving users more control over event notifications. - +Added an option to enable message streaming from subgraph events, giving users more control over event notifications. + + + ## v0.4.45 + - Implemented support for authorization on custom routes, controlled by the `enable_custom_route_auth` flag. - Set default tracing to off for improved performance and simplified debugging. + - + ## v0.4.44 -- Used Redis key prefix for license-related keys to prevent conflicts with existing setups. - +Used Redis key prefix for license-related keys to prevent conflicts with existing setups. + + + ## v0.4.43 -- Implemented a health check for Redis connections to prevent them from idling out. - +Implemented a health check for Redis connections to prevent them from idling out. + + + ## v0.4.40 + - Prevented duplicate messages in resumable run and thread streams by addressing a race condition and adding tests to ensure consistent behavior. - Ensured that runs don't start until the pubsub subscription is confirmed to prevent message drops on startup. - Renamed platform from langgraph to improve clarity and branding. - Reset PostgreSQL connections after use to prevent lock holding and improved error reporting for transaction issues. + - + ## v0.4.39 + - Upgraded `hono` from version 4.7.6 to 4.9.7, addressing a security issue related to the `bodyLimit` middleware. - Allowed customization of the base authentication URL to enhance flexibility. - Pinned the 'ty' dependency to a stable version using 'uv' to prevent unexpected linting failures. + - + ## v0.4.38 + - Replaced `LANGSMITH_API_KEY` with `LANGSMITH_CONTROL_PLANE_API_KEY` to support hybrid deployments requiring license verification. - Introduced self-hosted log ingestion support, configurable via `SELF_HOSTED_LOGS_ENABLED` and `SELF_HOSTED_LOGS_ENDPOINT` environment variables. + - + ## v0.4.37 -- Required create permissions for copying threads to ensure proper authorization. - +Required create permissions for copying threads to ensure proper authorization. + + + ## v0.4.36 + - Improved error handling and added a delay to the sweep loop for smoother operation during Redis downtime or cancellation errors. - Updated the queue entrypoint to start the core-api gRPC server when `FF_USE_CORE_API` is enabled. - Introduced checks for invalid configurations in assistant endpoints to ensure consistency with other endpoints. + - + ## v0.4.35 + - Resolved a timezone issue in the core API, ensuring accurate time data retrieval. - Introduced a new `middleware_order` setting to apply authentication middleware before custom middleware, allowing finer control over protected route configurations. - Logged the Redis URL when errors occur during Redis client creation. - Improved Go engine/runtime context propagation to ensure consistent execution flow. - Removed the unnecessary `assistants.put` call from the executor entrypoint to streamline the process. + - + ## v0.4.34 -- Blocked unauthorized users from updating thread TTL settings to enhance security. - +Blocked unauthorized users from updating thread TTL settings to enhance security. + + + ## v0.4.33 + - Improved error handling for Redis locks by logging `LockNotOwnedError` and extending initial pool migration lock timeout to 60 seconds. - Updated the BaseMessage schema to align with the latest langchain-core version and synchronized build dependencies for consistent local development. + - + ## v0.4.32 + - Added a GO persistence layer to the API image, enabling GRPC server operation with PostgreSQL support and enhancing configurability. - Set the status to error when a timeout occurs to improve error handling. + - + ## v0.4.30 + - Added support for context when using `stream_mode="events"` and included new tests for this functionality. - Added support for overriding the server port using `$LANGGRAPH_SERVER_PORT` and removed an unnecessary Dockerfile `ARG` for cleaner configuration. - Applied authorization filters to all table references in thread delete CTE to enhance security. - Introduced self-hosted metrics ingestion capability, allowing metrics to be sent to an OTLP collector every minute when the corresponding environment variables are set. - Ensured that the `set_latest` function properly updates the name and description of the version. + - + ## v0.4.29 -- Ensured proper cleanup of redis pubsub connections in all scenarios. - +Ensured proper cleanup of redis pubsub connections in all scenarios. + + + ## v0.4.28 + - Added a format parameter to the queue metrics server for enhanced customization. - Corrected `MOUNT_PREFIX` environment variable usage in CLI for consistency with documentation and to prevent confusion. - Added a feature to log warnings when messages are dropped due to no subscribers, controllable via a feature flag. @@ -255,104 +380,145 @@ By default, objects are serialized using msgpack. Under certain uncommon situati - Ensured correct response headers are sent for a2a, improving compatibility and communication. - Consolidated PostgreSQL checkpoint implementation, added CI testing for the `/core` directory, fixed RemoteStore test errors, and enhanced the Store implementation with transactions. - Added PostgreSQL migrations to the queue server to prevent errors from graphs being added before migrations are performed. + - + ## v0.4.27 -- Replaced `coredis` with `redis-py` to improve connection handling and reliability under high traffic loads. - +Replaced `coredis` with `redis-py` to improve connection handling and reliability under high traffic loads. + + + ## v0.4.24 + - Added functionality to return full message history for A2A calls in accordance with the A2A spec. - Added a `LANGGRAPH_SERVER_HOST` environment variable to Dockerfiles to support custom host settings for dual stack mode. + - + ## v0.4.23 -- Use a faster message codec for redis streaming. - +Use a faster message codec for redis streaming. + + + ## v0.4.22 -- Ported long-stream handling to the run stream, join, and cancel endpoints for improved stream management. - +Ported long-stream handling to the run stream, join, and cancel endpoints for improved stream management. + + + ## v0.4.21 + - Added A2A streaming functionality and enhanced testing with the A2A SDK. - Added Prometheus metrics to track language usage in graphs, middleware, and authentication for improved insights. - Fixed bugs in Open Source Software related to message conversion for chunks. - Removed await from pubsub subscribes to reduce flakiness in cluster tests and added retries in the shutdown suite to enhance API stability. + - + ## v0.4.20 -- Optimized Pubsub initialization to prevent overhead and address subscription timing issues, ensuring smoother run execution. - +Optimized Pubsub initialization to prevent overhead and address subscription timing issues, ensuring smoother run execution. + + + ## v0.4.19 -- Removed warnings from psycopg by addressing function checks introduced in version 3.2.10. - +Removed warnings from psycopg by addressing function checks introduced in version 3.2.10. + + + ## v0.4.17 -- Filtered out logs with mount prefix to reduce noise in logging output. - +Filtered out logs with mount prefix to reduce noise in logging output. + + + ## v0.4.16 + - Added support for implicit thread creation in a2a to streamline operations. - Improved error serialization and emission in distributed runtime streams, enabling more comprehensive testing. + - + ## v0.4.13 + - Monitored queue status in the health endpoint to ensure correct behavior when PostgreSQL fails to initialize. - Addressed an issue with unequal swept ID lengths to improve log clarity. - Enhanced streaming outputs by avoiding re-serialization of DR payloads, using msgpack byte inspection for json-like parsing. + - + ## v0.4.12 + - Ensured metrics are returned even when experiencing database connection issues. - Optimized update streams to prevent unnecessary data transmission. - Upgraded `hono` from version 4.9.2 to 4.9.6 in the `storage_postgres/langgraph-api-server` for improved URL path parsing security. - Added retries and an in-memory cache for LangSmith access calls to improve resilience against single failures. + - + ## v0.4.11 -- Added support for TTL (time-to-live) in thread updates. - +Added support for TTL (time-to-live) in thread updates. + + + ## v0.4.10 -- In distributed runtime, update serde logic for final checkpoint -> thread setting. - +In distributed runtime, update serde logic for final checkpoint -> thread setting. + + + ## v0.4.9 + - Added support for filtering search results by IDs in the search endpoint for more precise queries. - Included configurable headers for assistant endpoints to enhance request customization. - Implemented a simple A2A endpoint with support for agent card retrieval, task creation, and task management. + - + ## v0.4.7 -- Stopped the inclusion of x-api-key to enhance security. - +Stopped the inclusion of x-api-key to enhance security. + + + ## v0.4.6 -- Fixed a race condition when joining streams, preventing duplicate start events. - +Fixed a race condition when joining streams, preventing duplicate start events. + + + ## v0.4.5 + - Ensured the checkpointer starts and stops correctly before and after the queue to improve shutdown and startup efficiency. - Resolved an issue where workers were being prematurely cancelled when the queue was cancelled. - Prevented queue termination by adding a fallback for cases when Redis fails to wake a worker. + - + ## v0.4.4 + - Set the custom auth thread_id to None for stateless runs to prevent conflicts. - Improved Redis signaling in the Go runtime by adding a wakeup worker and Redis lock implementation, and updated sweep logic. + - + ## v0.4.3 + - Added stream mode to thread stream for improved data processing. - Added a durability parameter to runs for improved data persistence. + - + ## v0.4.2 -- Ensured pubsub is initialized before creating a run to prevent errors from missing messages. - +Ensured pubsub is initialized before creating a run to prevent errors from missing messages. + + + ## v0.4.0 Minor version 0.4 comes with a number of improvements as well as some breaking changes. @@ -366,65 +532,89 @@ This minor version also includes a couple of breaking changes to improve the usa - In this minor version, we stop the practice of automatically including headers as configurable values in your runs. You can opt-in to specific patterns by setting **configurable_headers** in your agent server config. - Run stream event IDs (for resumable streams) are now in the format of `ms-seq` instead of the previous format. We retain backwards compatibility for the old format, but we recommend using the new format for new code. + - + ## v0.3.4 + - Added custom Prometheus metrics for Redis/PG connection pools and switched the queue server to Uvicorn/Starlette for improved monitoring. - Restored Wolfi image build by correcting shell command formatting and added a Makefile target for testing with nginx. + - + ## v0.3.3 + - Added timeouts to specific Redis calls to prevent workers from being left active. - Updated the Golang runtime and added pytest skips for unsupported functionalities, including initial support for passing store to node and message streaming. - Introduced a reverse proxy setup for serving combined Python and Node.js graphs, with nginx handling server routing, to facilitate a Postgres/Redis backend for the Node.js API server. + - + ## v0.3.1 -- Added a statement timeout to the pool to prevent long-running queries. - +Added a statement timeout to the pool to prevent long-running queries. + + + ## v0.3.0 + - Set a default 15-minute statement timeout and implemented monitoring for long-running queries to ensure system efficiency. - Stop propagating run configurable values to the thread configuration, because this can cause issues on subsequent runs if you are specifying a checkpoint_id. This is a **slight breaking change** in behavior, since the thread value will no longer automatically reflect the unioned configuration of the most recent run. We believe this behavior is more intuitive, however. - Enhanced compatibility with older worker versions by handling event data in channel names within ops.py. + - + ## v0.2.137 -- Fixed an unbound local error and improved logging for thread interruptions or errors, along with type updates. - +Fixed an unbound local error and improved logging for thread interruptions or errors, along with type updates. + + + ## v0.2.136 + - Added enhanced logging to aid in debugging metaview issues. - Upgraded executor and runtime to the latest version for improved performance and stability. + - + ## v0.2.135 -- Ensured async coroutines are properly awaited to prevent potential runtime errors. - +Ensured async coroutines are properly awaited to prevent potential runtime errors. + + + ## v0.2.134 -- Enhanced search functionality to improve performance by allowing users to select specific columns for query results. - +Enhanced search functionality to improve performance by allowing users to select specific columns for query results. + + + ## v0.2.133 + - Added count endpoints for crons, threads, and assistants to enhance data tracking (#1132). - Improved SSH functionality for better reliability and stability. - Updated @langchain/langgraph-api to version 0.0.59 to fix an invalid state schema issue. + - + ## v0.2.132 + - Added Go language images to enhance project compatibility and functionality. - Printed internal PIDs for JS workers to facilitate process inspection via SIGUSR1 signal. - Resolved a `run_pkey` error that occurred when attempting to insert duplicate runs. - Added `ty run` command and switched to using uuid7 for generating run IDs. - Implemented the initial Golang runtime to expand language support. + - + ## v0.2.131 -- Added support for `object agent spec` with descriptions in JS. - +Added support for `object agent spec` with descriptions in JS. + + + ## v0.2.130 + - Added a feature flag (FF_RICH_THREADS=false) to disable thread updates on run creation, reducing lock contention and simplifying thread status handling. - Utilized existing connections for `aput` and `apwrite` operations to improve performance. - Improved error handling for decoding issues to enhance data processing reliability. @@ -434,398 +624,499 @@ This minor version also includes a couple of breaking changes to improve the usa - Changed the default multitask strategy to enqueue, improving throughput by eliminating the need to fetch inflight runs during new run insertions. - Optimized database operations for `Runs.next` and `Runs.sweep` to reduce redundant queries and improve efficiency. - Improved run creation speed by skipping unnecessary inflight runs queries. + - + ## v0.2.129 + - Stopped passing internal LGP fields to context to prevent breaking type checks. - Exposed content-location headers to ensure correct resumability behavior in the API. + - + ## v0.2.128 -- Ensured synchronized updates between `configurable` and `context` in assistants, preventing setup errors and supporting smoother version transitions. - +Ensured synchronized updates between `configurable` and `context` in assistants, preventing setup errors and supporting smoother version transitions. + + + ## v0.2.127 -- Excluded unrequested stream modes from the resumable stream to optimize functionality. - +Excluded unrequested stream modes from the resumable stream to optimize functionality. + + + ## v0.2.126 + - Made access logger headers configurable to enhance logging flexibility. - Debounced the Runs.stats function to reduce the frequency of expensive calls and improve performance. - Introduced debouncing for sweepers to enhance performance and efficiency (#1147). - Acquired a lock for TTL sweeping to prevent database spamming during scale-out operations. + - + ## v0.2.125 -- Updated tracing context replicas to use the new format, ensuring compatibility. - +Updated tracing context replicas to use the new format, ensuring compatibility. + + + ## v0.2.123 -- Added an entrypoint to the queue replica for improved deployment management. - +Added an entrypoint to the queue replica for improved deployment management. + + + ## v0.2.122 -- Utilized persisted interrupt status in `join` to ensure correct handling of user's interrupt state after completion. - +Utilized persisted interrupt status in `join` to ensure correct handling of user's interrupt state after completion. + + + ## v0.2.121 + - Consolidated events to a single channel to prevent race conditions and optimize startup performance. - Ensured custom lifespans are invoked on queue workers for proper setup, and added tests. + - + ## v0.2.120 + - Restored the original streaming behavior of runs, ensuring consistent inclusion of interrupt events based on `stream_mode` settings. - Optimized `Runs.next` query to reduce average execution time from ~14.43ms to ~2.42ms, improving performance. - Added support for stream mode "tasks" and "checkpoints", normalized the UI namespace, and upgraded `@langchain/langgraph-api` for enhanced functionality. + - + ## v0.2.117 -- Added a composite index on threads for faster searches with owner-based authentication and updated the default sort order to `updated_at` for improved query performance. - +Added a composite index on threads for faster searches with owner-based authentication and updated the default sort order to `updated_at` for improved query performance. + + + ## v0.2.116 -- Reduced the default number of history checkpoints from 10 to 1 to optimize performance. - +Reduced the default number of history checkpoints from 10 to 1 to optimize performance. + + + ## v0.2.115 -- Optimized cache re-use to enhance application performance and efficiency. - +Optimized cache re-use to enhance application performance and efficiency. + + + ## v0.2.113 -- Improved thread search pagination by updating response headers with `X-Pagination-Total` and `X-Pagination-Next` for better navigation. - +Improved thread search pagination by updating response headers with `X-Pagination-Total` and `X-Pagination-Next` for better navigation. + + + ## v0.2.112 + - Ensured sync logging methods are awaited and added a linter to prevent future occurrences. - Fixed an issue where JavaScript tasks were not being populated correctly for JS graphs. + - + ## v0.2.111 -- Fixed JS graph streaming failure by starting the heartbeat as soon as the connection opens. - +Fixed JS graph streaming failure by starting the heartbeat as soon as the connection opens. + + + ## v0.2.110 -- Added interrupts as default values for join operations while preserving stream behavior. - +Added interrupts as default values for join operations while preserving stream behavior. + + + ## v0.2.109 -- Fixed an issue where config schema was missing when `config_type` was not set, ensuring more reliable configurations. - +Fixed an issue where config schema was missing when `config_type` was not set, ensuring more reliable configurations. + + + ## v0.2.108 -- Prepared for LangGraph v0.6 compatibility with new context API support and bug fixes. - +Prepared for LangGraph v0.6 compatibility with new context API support and bug fixes. + + + ## v0.2.107 + - Implemented caching for authentication processes to enhance performance and efficiency. - Optimized database performance by merging count and select queries. + - + ## v0.2.106 -- Made log streams resumable, enhancing reliability and improving user experience when reconnecting. - +Made log streams resumable, enhancing reliability and improving user experience when reconnecting. + + + ## v0.2.105 -- Added a heapdump endpoint to save memory heap information to a file. - +Added a heapdump endpoint to save memory heap information to a file. + + + ## v0.2.103 -- Used the correct metadata endpoint to resolve issues with data retrieval. - +Used the correct metadata endpoint to resolve issues with data retrieval. + + + ## v0.2.102 + - Captured interrupt events in the wait method to preserve previous behavior from langgraph 0.5.0. - Added support for SDK structlog in the JavaScript environment for enhanced logging capabilities. + - + ## v0.2.101 -- Corrected the metadata endpoint for self-hosted deployments. - +Corrected the metadata endpoint for self-hosted deployments. + + + ## v0.2.99 + - Improved license check by adding an in-memory cache and handling Redis connection errors more effectively. - Reloaded assistants to preserve manually created ones while discarding those removed from the configuration file. - Reverted changes to ensure the UI namespace for gen UI is a valid JavaScript property name. - Ensured that the UI namespace for generated UI is a valid JavaScript property name, improving API compliance. - Enhanced error handling to return a 422 status code for unprocessable entity requests. + - + ## v0.2.98 -- Added context to langgraph nodes to improve log filtering and trace visibility. - +Added context to langgraph nodes to improve log filtering and trace visibility. + + + ## v0.2.97 + - Improved interoperability with the ckpt ingestion worker on the main loop to prevent task scheduling issues. - Delayed queue worker startup until after migrations are completed to prevent premature execution. - Enhanced thread state error handling by adding specific metadata and improved response codes for better clarity when state updates fail during creation. - Exposed the interrupt ID when retrieving the thread state to improve API transparency. + - + ## v0.2.96 -* Added a fallback mechanism for configurable header patterns to handle exclude/include settings more effectively. +Added a fallback mechanism for configurable header patterns to handle exclude/include settings more effectively. + - + ## v0.2.95 -* Avoided setting the future if it is already done to prevent redundant operations. -* Resolved compatibility errors in CI by switching from `typing.TypedDict` to `typing_extensions.TypedDict` for Python versions below 3.12. +- Avoided setting the future if it is already done to prevent redundant operations. +- Resolved compatibility errors in CI by switching from `typing.TypedDict` to `typing_extensions.TypedDict` for Python versions below 3.12. + - + ## v0.2.94 -* Improved performance by omitting pending sends for langgraph versions 0.5 and above. -* Improved server startup logs to provide clearer warnings when the DD_API_KEY environment variable is set. +- Improved performance by omitting pending sends for langgraph versions 0.5 and above. +- Improved server startup logs to provide clearer warnings when the DD_API_KEY environment variable is set. + - + ## v0.2.93 -* Removed the GIN index for run metadata to improve performance. +Removed the GIN index for run metadata to improve performance. + - + ## v0.2.92 -* Enabled copying functionality for blobs and checkpoints, improving data management flexibility. +Enabled copying functionality for blobs and checkpoints, improving data management flexibility. + - + ## v0.2.91 -* Reduced writes to the `checkpoint_blobs` table by inlining small values (null, numeric, str, etc.). This means we don't need to store extra values for channels that haven't been updated. +Reduced writes to the `checkpoint_blobs` table by inlining small values (null, numeric, str, etc.). This means we don't need to store extra values for channels that haven't been updated. + - + ## v0.2.90 -* Improve checkpoint writes via node-local background queueing. +Improve checkpoint writes via node-local background queueing. + - + ## v0.2.89 -* Decoupled checkpoint writing from thread/run state by removing foreign keys and updated logger to prevent timeout-related failures. +Decoupled checkpoint writing from thread/run state by removing foreign keys and updated logger to prevent timeout-related failures. + - + ## v0.2.88 -* Removed the foreign key constraint for `thread` in the `run` table to simplify database schema. +Removed the foreign key constraint for `thread` in the `run` table to simplify database schema. + - + ## v0.2.87 -* Added more detailed logs for Redis worker signaling to improve debugging. +Added more detailed logs for Redis worker signaling to improve debugging. + - + ## v0.2.86 -* Honored tool descriptions in the `/mcp` endpoint to align with expected functionality. +Honored tool descriptions in the `/mcp` endpoint to align with expected functionality. + - + ## v0.2.85 -* Added support for the `on_disconnect` field to `runs/wait` and included disconnect logs for better debugging. +Added support for the `on_disconnect` field to `runs/wait` and included disconnect logs for better debugging. + - + ## v0.2.84 -* Removed unnecessary status updates to streamline thread handling and updated version to 0.2.84. +Removed unnecessary status updates to streamline thread handling and updated version to 0.2.84. + - + ## v0.2.83 -* Reduced the default time-to-live for resumable streams to 2 minutes. -* Enhanced data submission logic to send data to both Beacon and LangSmith instance based on license configuration. -* Enabled submission of self-hosted data to a LangSmith instance when the endpoint is configured. +- Reduced the default time-to-live for resumable streams to 2 minutes. +- Enhanced data submission logic to send data to both Beacon and LangSmith instance based on license configuration. +- Enabled submission of self-hosted data to a LangSmith instance when the endpoint is configured. + - + ## v0.2.82 -* Addressed a race condition in background runs by implementing a lock using join, ensuring reliable execution across CTEs. +Addressed a race condition in background runs by implementing a lock using join, ensuring reliable execution across CTEs. + - + ## v0.2.81 -* Optimized run streams by reducing initial wait time to improve responsiveness for older or non-existent runs. +Optimized run streams by reducing initial wait time to improve responsiveness for older or non-existent runs. + - + ## v0.2.80 -* Corrected parameter passing in the `logger.ainfo()` API call to resolve a TypeError. +Corrected parameter passing in the `logger.ainfo()` API call to resolve a TypeError. + - + ## v0.2.79 -* Fixed a JsonDecodeError in checkpointing with remote graph by correcting JSON serialization to handle trailing slashes properly. -* Introduced a configuration flag to disable webhooks globally across all routes. +- Fixed a JsonDecodeError in checkpointing with remote graph by correcting JSON serialization to handle trailing slashes properly. +- Introduced a configuration flag to disable webhooks globally across all routes. + - + ## v0.2.78 -* Added timeout retries to webhook calls to improve reliability. -* Added HTTP request metrics, including a request count and latency histogram, for enhanced monitoring capabilities. +- Added timeout retries to webhook calls to improve reliability. +- Added HTTP request metrics, including a request count and latency histogram, for enhanced monitoring capabilities. + - + ## v0.2.77 -* Added HTTP metrics to improve performance monitoring. -* Changed the Redis cache delimiter to reduce conflicts with subgraph message names and updated caching behavior. +- Added HTTP metrics to improve performance monitoring. +- Changed the Redis cache delimiter to reduce conflicts with subgraph message names and updated caching behavior. + - + ## v0.2.76 -* Updated Redis cache delimiter to prevent conflicts with subgraph messages. +Updated Redis cache delimiter to prevent conflicts with subgraph messages. + - + ## v0.2.74 -* Scheduled webhooks in an isolated loop to ensure thread-safe operations and prevent errors with PYTHONASYNCIODEBUG=1. +Scheduled webhooks in an isolated loop to ensure thread-safe operations and prevent errors with PYTHONASYNCIODEBUG=1. + - + ## v0.2.73 -* Fixed an infinite frame loop issue and removed the dict_parser due to structlog's unexpected behavior. -* Throw a 409 error on deadlock occurrence during run cancellations to handle lock conflicts gracefully. +- Fixed an infinite frame loop issue and removed the dict_parser due to structlog's unexpected behavior. +- Throw a 409 error on deadlock occurrence during run cancellations to handle lock conflicts gracefully. + - + ## v0.2.72 -* Ensured compatibility with future langgraph versions. -* Implemented a 409 response status to handle deadlock issues during cancellation. +- Ensured compatibility with future langgraph versions. +- Implemented a 409 response status to handle deadlock issues during cancellation. + - + ## v0.2.71 -* Improved logging for better clarity and detail regarding log types. +Improved logging for better clarity and detail regarding log types. + - + ## v0.2.70 -* Improved error handling to better distinguish and log TimeoutErrors caused by users from internal run timeouts. +Improved error handling to better distinguish and log TimeoutErrors caused by users from internal run timeouts. + - + ## v0.2.69 -* Added sorting and pagination to the crons API and updated schema definitions for improved accuracy. +Added sorting and pagination to the crons API and updated schema definitions for improved accuracy. + - + ## v0.2.66 -* Fixed a 404 error when creating multiple runs with the same thread_id using `on_not_exist="create"`. +Fixed a 404 error when creating multiple runs with the same thread_id using `on_not_exist="create"`. + - + ## v0.2.65 -* Ensured that only fields from `assistant_versions` are returned when necessary. -* Ensured consistent data types for in-memory and PostgreSQL users, improving internal authentication handling. +- Ensured that only fields from `assistant_versions` are returned when necessary. +- Ensured consistent data types for in-memory and PostgreSQL users, improving internal authentication handling. + - + ## v0.2.64 -* Added descriptions to version entries for better clarity. +Added descriptions to version entries for better clarity. + - + ## v0.2.62 -* Improved user handling for custom authentication in the JS Studio. -* Added Prometheus-format run statistics to the metrics endpoint for better monitoring. -* Added run statistics in Prometheus format to the metrics endpoint. +- Improved user handling for custom authentication in the JS Studio. +- Added Prometheus-format run statistics to the metrics endpoint for better monitoring. +- Added run statistics in Prometheus format to the metrics endpoint. + - + ## v0.2.61 -* Set a maximum idle time for Redis connections to prevent unnecessary open connections. +Set a maximum idle time for Redis connections to prevent unnecessary open connections. + - + ## v0.2.60 -* Enhanced error logging to include traceback details for dictionary operations. -* Added a `/metrics` endpoint to expose queue worker metrics for monitoring. +- Enhanced error logging to include traceback details for dictionary operations. +- Added a `/metrics` endpoint to expose queue worker metrics for monitoring. + - + ## v0.2.57 -* Removed CancelledError from retriable exceptions to allow local interrupts while maintaining retriability for workers. -* Introduced middleware to gracefully shut down the server after completing in-flight requests upon receiving a SIGINT. -* Reduced metadata stored in checkpoint to only include necessary information. -* Improved error handling in join runs to return error details when present. +- Removed CancelledError from retriable exceptions to allow local interrupts while maintaining retriability for workers. +- Introduced middleware to gracefully shut down the server after completing in-flight requests upon receiving a SIGINT. +- Reduced metadata stored in checkpoint to only include necessary information. +- Improved error handling in join runs to return error details when present. + - + ## v0.2.56 -* Improved application stability by adding a handler for SIGTERM signals. +Improved application stability by adding a handler for SIGTERM signals. + - + ## v0.2.55 -* Improved the handling of cancellations in the queue entrypoint. -* Improved cancellation handling in the queue entry point. +- Improved the handling of cancellations in the queue entrypoint. +- Improved cancellation handling in the queue entry point. + - + ## v0.2.54 -* Enhanced error message for LuaLock timeout during license validation. -* Fixed the $contains filter in custom auth by requiring an explicit ::text cast and updated tests accordingly. -* Ensured project and tenant IDs are formatted as UUIDs for consistency. +- Enhanced error message for LuaLock timeout during license validation. +- Fixed the $contains filter in custom auth by requiring an explicit ::text cast and updated tests accordingly. +- Ensured project and tenant IDs are formatted as UUIDs for consistency. + - + ## v0.2.53 -* Resolved a timing issue to ensure the queue starts only after the graph is registered. -* Improved performance by setting thread and run status in a single query and enhanced error handling during checkpoint writes. -* Reduced the default background grace period to 3 minutes. +- Resolved a timing issue to ensure the queue starts only after the graph is registered. +- Improved performance by setting thread and run status in a single query and enhanced error handling during checkpoint writes. +- Reduced the default background grace period to 3 minutes. + - + ## v0.2.52 -* Now logging expected graphs when one is omitted to improve traceability. -* Implemented a time-to-live (TTL) feature for resumable streams. -* Improved query efficiency and consistency by adding a unique index and optimizing row locking. +- Now logging expected graphs when one is omitted to improve traceability. +- Implemented a time-to-live (TTL) feature for resumable streams. +- Improved query efficiency and consistency by adding a unique index and optimizing row locking. + - + ## v0.2.51 -* Handled `CancelledError` by marking tasks as ready to retry, improving error management in worker processes. -* Added LG API version and request ID to metadata and logs for better tracking. -* Added LG API version and request ID to metadata and logs to improve traceability. -* Improved database performance by creating indexes concurrently. -* Ensured postgres write is committed only after the Redis running marker is set to prevent race conditions. -* Enhanced query efficiency and reliability by adding a unique index on thread_id/running, optimizing row locks, and ensuring deterministic run selection. -* Resolved a race condition by ensuring Postgres updates only occur after the Redis running marker is set. +- Handled `CancelledError` by marking tasks as ready to retry, improving error management in worker processes. +- Added LG API version and request ID to metadata and logs for better tracking. +- Added LG API version and request ID to metadata and logs to improve traceability. +- Improved database performance by creating indexes concurrently. +- Ensured postgres write is committed only after the Redis running marker is set to prevent race conditions. +- Enhanced query efficiency and reliability by adding a unique index on thread_id/running, optimizing row locks, and ensuring deterministic run selection. +- Resolved a race condition by ensuring Postgres updates only occur after the Redis running marker is set. + - + ## v0.2.46 -* Introduced a new connection for each operation while preserving transaction characteristics in Threads state `update()` and `bulk()` commands. +Introduced a new connection for each operation while preserving transaction characteristics in Threads state `update()` and `bulk()` commands. + - + ## v0.2.45 -* Enhanced streaming feature by incorporating tracing contexts. -* Removed an unnecessary query from the Crons.search function. -* Resolved connection reuse issue when scheduling next run for multiple cron jobs. -* Removed an unnecessary query in the Crons.search function to improve efficiency. -* Resolved an issue with scheduling the next cron run by improving connection reuse. +- Enhanced streaming feature by incorporating tracing contexts. +- Removed an unnecessary query from the Crons.search function. +- Resolved connection reuse issue when scheduling next run for multiple cron jobs. +- Removed an unnecessary query in the Crons.search function to improve efficiency. +- Resolved an issue with scheduling the next cron run by improving connection reuse. + - + ## v0.2.44 -* Enhanced the worker logic to exit the pipeline before continuing when the Redis message limit is reached. -* Introduced a ceiling for Redis message size with an option to skip messages larger than 128 MB for improved performance. -* Ensured the pipeline always closes properly to prevent resource leaks. +- Enhanced the worker logic to exit the pipeline before continuing when the Redis message limit is reached. +- Introduced a ceiling for Redis message size with an option to skip messages larger than 128 MB for improved performance. +- Ensured the pipeline always closes properly to prevent resource leaks. + - + ## v0.2.43 -* Improved performance by omitting logs in metadata calls and ensuring output schema compliance in value streaming. -* Ensured the connection is properly closed after use. -* Aligned output format to strictly adhere to the specified schema. -* Stopped sending internal logs in metadata requests to improve privacy. +- Improved performance by omitting logs in metadata calls and ensuring output schema compliance in value streaming. +- Ensured the connection is properly closed after use. +- Aligned output format to strictly adhere to the specified schema. +- Stopped sending internal logs in metadata requests to improve privacy. + - + ## v0.2.42 -* Added timestamps to track the start and end of a request's run. -* Added tracer information to the configuration settings. -* Added support for streaming with tracing contexts. +- Added timestamps to track the start and end of a request's run. +- Added tracer information to the configuration settings. +- Added support for streaming with tracing contexts. + - + ## v0.2.41 -* Added locking mechanism to prevent errors in pipelined executions. +Added locking mechanism to prevent errors in pipelined executions. + diff --git a/src/langsmith/agent-server-openapi.json b/src/langsmith/agent-server-openapi.json index 413392479b..ef63c7b89c 100644 --- a/src/langsmith/agent-server-openapi.json +++ b/src/langsmith/agent-server-openapi.json @@ -4411,7 +4411,7 @@ "enum": ["cancel", "continue"], "title": "On Disconnect", "description": "The disconnect mode to use. Must be one of 'cancel' or 'continue'.", - "default": "cancel" + "default": "continue" }, "feedback_keys": { "items": { @@ -4630,7 +4630,7 @@ "enum": ["cancel", "continue"], "title": "On Disconnect", "description": "The disconnect mode to use. Must be one of 'cancel' or 'continue'.", - "default": "cancel" + "default": "continue" }, "after_seconds": { "type": "number", diff --git a/src/langsmith/application-structure.mdx b/src/langsmith/application-structure.mdx index c77e65f47f..f8b5071e48 100644 --- a/src/langsmith/application-structure.mdx +++ b/src/langsmith/application-structure.mdx @@ -3,8 +3,7 @@ title: Application structure sidebarTitle: Application structure --- -import FrameworkAgnosticPy from '/snippets/langsmith/framework-agnostic-py.mdx'; -import FrameworkAgnosticJS from '/snippets/langsmith/framework-agnostic-js.mdx'; +import FrameworkAgnostic from '/snippets/langsmith/framework-agnostic.mdx'; To deploy on LangSmith, an application must consist of one or more graphs, a configuration file (`langgraph.json`), a file that specifies dependencies, and an optional `.env` file that specifies environment variables. @@ -21,12 +20,7 @@ To deploy using LangSmith, provide the following information: **Framework agnostic** -:::python - -::: -:::js - -::: + ## File structure @@ -155,6 +149,94 @@ Use the `graphs` key in the [configuration file](#configuration-file-concepts) t You can specify one or more graphs in the configuration file. Each graph is identified by a unique name and a path to either (1) a compiled graph or (2) a function that defines a graph. +### Use any framework with LangSmith Deployment + +While LangSmith Deployment requires applications to be structured as a LangGraph graph, individual nodes within that graph can contain arbitrary code. This means you can use any framework or library within your nodes while still benefiting from LangSmith's deployment infrastructure. + +The graph structure serves as a deployment interface, but your core application logic can use whichever tools and frameworks best suit your needs. + +To deploy with LangSmith, you need: + + + + + 1. **A LangGraph graph structure**: Define a graph using @[`StateGraph`] with @[`add_node`] and @[`add_edge`]. + 1. **Node functions with arbitrary logic**: Your node functions can call any framework or library. + 1. **A compiled graph**: @[Compile][StateGraph.compile] the graph to create a deployable application. + + The following example shows how to wrap your existing application logic within a minimal LangGraph structure: + + ```python + from langgraph.graph import StateGraph, START, END + from typing import TypedDict + + # Your existing application logic using any framework + from app_logic import process_data + from app_logic import fetch_data + + class State(TypedDict): + input: str + result: str + + def my_app_node(state: State) -> State: + """Node containing arbitrary framework code.""" + # Use any framework or library here + raw_data = fetch_data(state["input"]) + processed = process_data(raw_data) + return {"result": processed} + + # Define the graph structure + graph = StateGraph(State) + graph.add_node("process", my_app_node) # Add node with your logic + graph.add_edge(START, "process") # Connect start to your node + graph.add_edge("process", END) # Connect your node to end + + # Compile for deployment + app = graph.compile() + ``` + + + + 1. **A LangGraph graph structure**: Define a graph using [`StateGraph`](https://reference.langchain.com/javascript/classes/_langchain_langgraph.index.StateGraph.html) with [`addNode`](https://reference.langchain.com/javascript/classes/_langchain_langgraph.index.StateGraph.html#addnode) and [`addEdge`](https://reference.langchain.com/javascript/classes/_langchain_langgraph.index.StateGraph.html#addedge). + 1. **Node functions with arbitrary logic**: Your node functions can call any framework or library. + 1. **A compiled graph**: [Compile](https://reference.langchain.com/javascript/classes/_langchain_langgraph.index.StateGraph.html#compile) the graph to create a deployable application. + + The following example shows how to wrap your existing application logic within a minimal LangGraph structure: + + ```typescript + import { StateGraph, START, END } from "@langchain/langgraph"; + import { Annotation } from "@langchain/langgraph"; + + // Your existing application logic using any framework + import { processData } from "./app-logic"; + import { fetchData } from "./app-logic"; + + const State = Annotation.Root({ + input: Annotation, + result: Annotation + }); + + async function myAppNode(state: typeof State.State) { + // Use any framework or library here + const rawData = await fetchData(state.input); + const processed = await processData(rawData); + return { result: processed }; + } + + // Define the graph structure + const graph = new StateGraph(State) + .addNode("process", myAppNode) // Add node with your logic + .addEdge(START, "process") // Connect start to your node + .addEdge("process", END); // Connect your node to end + + // Compile for deployment + export const app = graph.compile(); + ``` + + + +In this example, the node functions (`my_app_node` for Python and `myAppNode` for JavaScript) can contain calls to any framework or library. The LangGraph structure simply provides the deployment interface and orchestration layer. + ## Environment variables If you're working with a deployed LangGraph application [locally](/langsmith/local-server), you can configure environment variables in the `env` key of the [configuration file](#configuration-file-concepts). diff --git a/src/langsmith/cicd-pipeline-example.mdx b/src/langsmith/cicd-pipeline-example.mdx index 6448e187db..907f3b4ab4 100644 --- a/src/langsmith/cicd-pipeline-example.mdx +++ b/src/langsmith/cicd-pipeline-example.mdx @@ -1,9 +1,9 @@ --- -title: Implement a CI/CD pipeline using LangSmith Deployments and Evaluation +title: Implement a CI/CD pipeline using LangSmith Deployment and Evaluation sidebarTitle: Implement a CI/CD pipeline --- -This guide demonstrates how to implement a comprehensive CI/CD pipeline for AI agent applications deployed in LangSmith Deployments. In this example, you'll use the [LangGraph](/oss/langgraph/overview) open source framework for orchestrating and building the agent, [LangSmith](/langsmith/home) for observability and evaluations. This pipeline is based on the [cicd-pipeline-example repository](https://github.com/langchain-ai/cicd-pipeline-example). +This guide demonstrates how to implement a comprehensive CI/CD pipeline for AI agent applications deployed in LangSmith Deployment. In this example, you'll use the [LangGraph](/oss/langgraph/overview) open source framework for orchestrating and building the agent, [LangSmith](/langsmith/home) for observability and evaluations. This pipeline is based on the [cicd-pipeline-example repository](https://github.com/langchain-ai/cicd-pipeline-example). ## Overview @@ -87,7 +87,7 @@ The CI/CD pipeline uses GitHub Actions with the [Control Plane API](/langsmith/a The workflow includes: -- **New agent deployment**: When a new PR is opened and tests pass, a new preview deployment is created in LangSmith Deployments using the [Control Plane API](/langsmith/api-ref-control-plane). This allows you to test the agent in a staging environment before promoting to production. +- **New agent deployment**: When a new PR is opened and tests pass, a new preview deployment is created in LangSmith Deployment using the [Control Plane API](/langsmith/api-ref-control-plane). This allows you to test the agent in a staging environment before promoting to production. - **Agent deployment revision**: A revision happens when an existing deployment with the same ID is found, or when the PR is merged into main. In the case of merging to main, the preview deployment is deleted and a production deployment is created. This ensures that any updates to the agent are properly deployed and integrated into the production infrastructure. @@ -137,7 +137,7 @@ LangSmith supports multiple deployment methods, depending on how your [LangSmith - **Cloud LangSmith**: Direct GitHub integration or Docker image deployment. - **Self-Hosted/Hybrid**: Container registry-based deployments. -The deployment flow starts by modifying your agent implementation. At minimum, you must have a [`langgraph.json`](/langsmith/application-structure) and dependency file in your project (`requirements.txt` or `pyproject.toml`). Use the `langgraph dev` CLI tool to check for errors—fix any errors; otherwise, the deployment will succeed when deployed to LangSmith Deployments. +The deployment flow starts by modifying your agent implementation. At minimum, you must have a [`langgraph.json`](/langsmith/application-structure) and dependency file in your project (`requirements.txt` or `pyproject.toml`). Use the `langgraph dev` CLI tool to check for errors—fix any errors; otherwise, the deployment will succeed when deployed to LangSmith Deployment. ```mermaid graph TD @@ -283,7 +283,7 @@ See the [LangGraph CLI build documentation](/langsmith/cli#build) for more detai #### Database & cache configuration -By default, LangSmith Deployments create PostgreSQL and Redis instances for you. To use external services, set the following environment variables in your new deployment or revision: +By default, LangSmith Deployment create PostgreSQL and Redis instances for you. To use external services, set the following environment variables in your new deployment or revision: ```bash # Set environment variables for external services @@ -314,9 +314,9 @@ For self-hosted LangSmith instances, use `http(s):///api` where ` If you're setting the endpoint in the `LANGSMITH_ENDPOINT` environment variable, you need to add `/v1` at the end (e.g., `https://api.smith.langchain.com/v1` or `http(s):///api/v1` if self-hosted). -#### LangSmith Deployments API (Deployments) +#### LangSmith Deployment API (Deployments) -For LangSmith Deployments operations (deployments, revisions): +For LangSmith Deployment operations (deployments, revisions): | Region | Endpoint | |--------|----------| diff --git a/src/langsmith/deploy-to-cloud.mdx b/src/langsmith/deploy-to-cloud.mdx index 9d58bac27f..70079652af 100644 --- a/src/langsmith/deploy-to-cloud.mdx +++ b/src/langsmith/deploy-to-cloud.mdx @@ -1,5 +1,5 @@ --- -title: LangSmith on Cloud +title: Deploy on Cloud sidebarTitle: Cloud icon: "cloud" iconType: "solid" @@ -8,7 +8,7 @@ iconType: "solid" This is the comprehensive setup and management guide for deploying applications to LangSmith Cloud. -**Looking for a quick setup?** Try the [quickstart guide](/langsmith/deployment-quickstart) first. +**If you're looking for a quick setup**, try the [quickstart guide](/langsmith/deployment-quickstart) first. @@ -16,8 +16,8 @@ Before setting up, review the [Cloud overview page](/langsmith/cloud) to underst ## Prerequisites -1. LangSmith applications are deployed from GitHub repositories. Configure and upload a LangSmith application to a GitHub repository in order to deploy it to LangSmith. -2. [Verify that the LangGraph API runs locally](/langsmith/local-server). If the API does not run successfully (i.e. `langgraph dev`), deploying to LangSmith will fail as well. +- Applications are deployed from GitHub repositories. Configure and upload an application to a GitHub repository. +- [Verify that the LangGraph API runs locally](/langsmith/local-server). If the API does not run successfully (i.e., `langgraph dev`), deploying to LangSmith will fail as well. **One-Time Setup Required**: A GitHub organization owner or admin must complete the OAuth flow in the LangSmith UI to authorize the `hosted-langserve` GitHub app. This only needs to be done once per workspace. After the initial OAuth authorization, all developers with deployment permissions can create and manage deployments without requiring GitHub admin access. @@ -25,49 +25,44 @@ Before setting up, review the [Cloud overview page](/langsmith/cloud) to underst ## Create new deployment -Starting from the LangSmith UI: - -1. In the left-hand navigation panel, select **Deployments**, which contains a list of existing deployments. -2. In the top-right corner, select **+ New Deployment** to create a new deployment. -3. In the `Create New Deployment` panel, fill out the required fields. - 1. `Deployment details` - 1. Select `Import from GitHub` and follow the GitHub OAuth workflow to install and authorize LangChain's `hosted-langserve` GitHub app to access the selected repositories. After installation is complete, return to the `Create New Deployment` panel and select the GitHub repository to deploy from the dropdown menu. - The GitHub user installing LangChain's `hosted-langserve` GitHub app must be an [owner](https://docs.github.com/en/organizations/managing-peoples-access-to-your-organization-with-roles/roles-in-an-organization#organization-owners) of the organization or account. This authorization only needs to be completed once per LangSmith workspace - subsequent deployments can be created by any user with deployment permissions. - 2. Specify a name for the deployment. - 3. Specify the desired `Git Branch`. A deployment is linked to a branch. When a new revision is created, code for the linked branch will be deployed. The branch can be updated later in the [Deployment Settings](#deployment-settings). - 4. Specify the full path to the [LangGraph API config file](/langsmith/cli#configuration-file) including the file name. For example, if the file `langgraph.json` is in the root of the repository, simply specify `langgraph.json`. - 5. Use the checkbox to `Automatically update deployment on push to branch`. If checked, the deployment will automatically be updated when changes are pushed to the specified `Git Branch`. This setting can be enabled/disabled later in the [Deployment Settings](#deployment-settings). - 2. Select the desired `Deployment Type`. - 1. `Development` deployments are meant for non-production use cases and are provisioned with minimal resources. - 2. `Production` deployments can serve up to 500 requests/second and are provisioned with highly available storage with automatic backups. - 3. Determine if the deployment should be `Shareable through Studio`. - 1. If unchecked, the deployment will only be accessible with a valid LangSmith API key for the workspace. - 2. If checked, the deployment will be accessible through Studio to any LangSmith user. A direct URL to Studio for the deployment will be provided to share with other LangSmith users. - 4. Specify `Environment Variables` and secrets. See the [Environment Variables reference](/langsmith/env-var) to configure additional variables for the deployment. - 1. Sensitive values such as API keys (e.g. `OPENAI_API_KEY`) should be specified as secrets. - 2. Additional non-secret environment variables can be specified as well. - 5. A new LangSmith `Tracing Project` is automatically created with the same name as the deployment. -4. In the top-right corner, select `Submit`. After a few seconds, the `Deployment` view appears and the new deployment will be queued for provisioning. +Starting from the [LangSmith UI](https://smith.langchain.com), select **Deployments** in the left-hand navigation panel, **Deployments**. In the top-right corner, select **+ New Deployment** to create a new deployment: + +1. In the **Create New Deployment** panel, fill out the required fields. For **Deployment details**: + 1. Select **Import from GitHub** and follow the GitHub OAuth workflow to install and authorize LangChain's `hosted-langserve` GitHub app to access the selected repositories. After installation is complete, return to the **Create New Deployment** panel and select the GitHub repository to deploy from the dropdown menu. + The GitHub user installing LangChain's `hosted-langserve` GitHub app must be an [owner](https://docs.github.com/en/organizations/managing-peoples-access-to-your-organization-with-roles/roles-in-an-organization#organization-owners) of the organization or account. This authorization only needs to be completed once per LangSmith workspace—subsequent deployments can be created by any user with deployment permissions. + 1. Specify a name for the deployment. + 1. Specify the desired **Git Branch**. A deployment is linked to a branch. When a new revision is created, code for the linked branch will be deployed. The branch can be updated later in the [Deployment Settings](#deployment-settings). + 1. Specify the full path to the [LangGraph API config file](/langsmith/cli#configuration-file) including the file name. For example, if the file `langgraph.json` is in the root of the repository, specify `langgraph.json`. + 1. Use the checkbox to **Automatically update deployment on push to branch**. If checked, the deployment will automatically be updated when changes are pushed to the specified **Git Branch**. You can enable or disable this setting on the [Deployment Settings](#deployment-settings) in [the UI](https://smith.langchain.com). + For **Deployment Type**: + - Development deployments are meant for non-production use cases and are provisioned with minimal resources. + - Production deployments can serve up to 500 requests/second and are provisioned with highly available storage with automatic backups. + 1. Determine if the deployment should be **Shareable through Studio**. + 1. If unchecked, the deployment will only be accessible with a valid LangSmith API key for the [workspace](/langsmith/administration-overview#workspaces). + 1. If checked, the deployment will be accessible through [Studio](/langsmith/studio) to any LangSmith user. A direct URL to Studio for the deployment will be provided to share with other LangSmith users. + 1. Specify **Environment Variables** and secrets. To configure additional variables for the deployment, refer to the [Environment Variables reference](/langsmith/env-var). + 1. Sensitive values such as API keys (e.g., `OPENAI_API_KEY`) should be specified as secrets. + 1. Additional non-secret environment variables can be specified as well. + 1. A new LangSmith [tracing project](/langsmith/observability) is automatically created with the same name as the deployment. +1. In the top-right corner, select **Submit**. After a few seconds, the **Deployment** view appears and the new deployment will be queued for provisioning. ## Create new revision -When [creating a new deployment](#create-new-deployment), a new revision is created by default. Subsequent revisions can be created to deploy new code changes. +When [creating a new deployment](#create-new-deployment), a new revision is created by default. You can create subsequent revisions to deploy new code changes. -Starting from the LangSmith UI... +Starting from the [LangSmith UI](https://smith.langchain.com), select **Deployments** in the left-hand navigation panel. Select an existing deployment to create a new revision for. -1. In the left-hand navigation panel, select **Deployments**, which contains a list of existing deployments. -2. Select an existing deployment to create a new revision for. -3. In the `Deployment` view, in the top-right corner, select `+ New Revision`. -4. In the `New Revision` modal, fill out the required fields. - 1. Specify the full path to the [LangGraph API config file](/langsmith/cli#configuration-file) including the file name. For example, if the file `langgraph.json` is in the root of the repository, simply specify `langgraph.json`. - 2. Determine if the deployment should be `Shareable through Studio`. - 1. If unchecked, the deployment will only be accessible with a valid LangSmith API key for the workspace. - 2. If checked, the deployment will be accessible through Studio to any LangSmith user. A direct URL to Studio for the deployment will be provided to share with other LangSmith users. - 3. Specify `Environment Variables` and secrets. Existing secrets and environment variables are prepopulated. See the [Environment Variables reference](/langsmith/env-var) to configure additional variables for the revision. +1. In the **Deployment** view, in the top-right corner, select **+ New Revision**. +1. In the **New Revision** modal, fill out the required fields. + 1. Specify the full path to the [API config file](/langsmith/cli#configuration-file) including the file name. For example, if the file `langgraph.json` is in the root of the repository, specify `langgraph.json`. + 1. Determine if the deployment should be **Shareable through Studio**. + - If unchecked, the deployment will only be accessible with a valid LangSmith API key for the [workspace](/langsmith/administration-overview#workspaces). + - If checked, the deployment will be accessible through [Studio](/langsmith/studio) to any LangSmith user. A direct URL to Studio for the deployment will be provided to share with other LangSmith users. + 1. Specify **Environment Variables** and secrets. Existing secrets and environment variables are prepopulated. To configure additional variables for the revision, refer to the [Environment Variables reference](/langsmith/env-var). 1. Add new secrets or environment variables. - 2. Remove existing secrets or environment variables. - 3. Update the value of existing secrets or environment variables. -5. Select `Submit`. After a few seconds, the `New Revision` modal will close and the new revision will be queued for deployment. + 1. Remove existing secrets or environment variables. + 1. Update the value of existing secrets or environment variables. +1. Select **Submit`**. After a few seconds, the **New Revision** modal will close and the new revision will be queued for deployment. ## View build and server logs @@ -75,18 +70,18 @@ Build and server logs are available for each revision. Starting from the **Deployments** view: -1. Select the desired revision from the `Revisions` table. A panel slides open from the right-hand side and the `Build` tab is selected by default, which displays build logs for the revision. -2. In the panel, select the `Server` tab to view server logs for the revision. Server logs are only available after a revision has been deployed. -3. Within the `Server` tab, adjust the date/time range picker as needed. By default, the date/time range picker is set to the `Last 7 days`. +1. Select the desired revision from the **Revisions** table. A panel slides open from the right-hand side and the **Build** tab is selected by default, which displays build logs for the revision. +1. In the panel, select the **Server** tab to view server logs for the revision. Server logs are only available after a revision has been deployed. +1. Within the **Server** tab, adjust the date/time range picker as needed. By default, the date/time range picker is set to the **Last 7 days**. ## View deployment metrics -Starting from the LangSmith UI... +Starting from the [LangSmith UI](https://smith.langchain.com): -1. In the left-hand navigation panel, select **Deployments**, which contains a list of existing deployments. -2. Select an existing deployment to monitor. -3. Select the `Monitoring` tab to view the deployment metrics. See a list of [all available metrics](/langsmith/control-plane#monitoring). -4. Within the `Monitoring` tab, use the date/time range picker as needed. By default, the date/time range picker is set to the `Last 15 minutes`. +1. In the left-hand navigation panel, select **Deployments**. +1. Select an existing deployment to monitor. +1. Select the **Monitoring** tab to view the deployment metrics. Refer to a list of [all available metrics](/langsmith/control-plane#monitoring). +1. Within the **Monitoring** tab, use the date/time range picker as needed. By default, the date/time range picker is set to the **Last 15 minutes**. ## Interrupt revision @@ -99,38 +94,38 @@ Interrupted revisions have undefined behavior. This is only useful if you need t Starting from the **Deployments** view: -1. Select the menu icon (three dots) on the right-hand side of the row for the desired revision from the `Revisions` table. -2. Select `Interrupt` from the menu. -3. A modal will appear. Review the confirmation message. Select `Interrupt revision`. +1. Select the menu icon (three dots) on the right-hand side of the row for the desired revision from the **Revisions** table. +1. Select **Interrupt** from the menu. +1. A modal will appear. Review the confirmation message. Select **Interrupt revision**. ## Delete deployment -Starting from the LangSmith UI... +Starting from the [LangSmith UI](https://smith.langchain.com): 1. In the left-hand navigation panel, select **Deployments**, which contains a list of existing deployments. -2. Select the menu icon (three dots) on the right-hand side of the row for the desired deployment and select `Delete`. -3. A `Confirmation` modal will appear. Select `Delete`. +1. Select the menu icon (three dots) on the right-hand side of the row for the desired deployment and select **Delete**. +1. A **Confirmation** modal will appear. Select **Delete**. ## Deployment settings Starting from the **Deployments** view: -1. In the top-right corner, select the gear icon (`Deployment Settings`). -2. Update the `Git Branch` to the desired branch. -3. Check/uncheck checkbox to `Automatically update deployment on push to branch`. +1. In the top-right corner, select the gear icon (**Deployment Settings**). +1. Update the `Git Branch` to the desired branch. +1. Check/uncheck checkbox to **Automatically update deployment on push to branch**. 1. Branch creation/deletion and tag creation/deletion events will not trigger an update. Only pushes to an existing branch will trigger an update. - 2. Pushes in quick succession to a branch will queue subsequent updates. Once a build completes, the most recent commit will begin building and the other queued builds will be skipped. + 1. Pushes in quick succession to a branch will queue subsequent updates. Once a build completes, the most recent commit will begin building and the other queued builds will be skipped. ## Add or remove GitHub repositories After installing and authorizing LangChain's `hosted-langserve` GitHub app, repository access for the app can be modified to add new repositories or remove existing repositories. If a new repository is created, it may need to be added explicitly. -1. From the GitHub profile, navigate to `Settings` > `Applications` > `hosted-langserve` > click `Configure`. -2. Under `Repository access`, select `All repositories` or `Only select repositories`. If `Only select repositories` is selected, new repositories must be explicitly added. -3. Click `Save`. -4. When creating a new deployment, the list of GitHub repositories in the dropdown menu will be updated to reflect the repository access changes. +1. From the GitHub profile, navigate to **Settings** > **Applications** > `hosted-langserve` > click **Configure**. +1. Under **Repository access**, select **All repositories** or **Only select repositories**. If **Only select repositories** is selected, new repositories must be explicitly added. +1. Click **Save**. +1. When creating a new deployment, the list of GitHub repositories in the dropdown menu will be updated to reflect the repository access changes. -## Allowlisting IP addresses +## Allowlist IP addresses All traffic from LangSmith deployments created after January 6th 2025 will come through a NAT gateway. This NAT gateway will have several static ip addresses depending on the region you are deploying in. Refer to the table below for the list of IP addresses to allowlist: diff --git a/src/langsmith/observability-studio.mdx b/src/langsmith/observability-studio.mdx index 26405b953c..6e1ac4550b 100644 --- a/src/langsmith/observability-studio.mdx +++ b/src/langsmith/observability-studio.mdx @@ -155,6 +155,15 @@ Before running an experiment, ensure you have the following: - An application deployed on [LangSmith](/langsmith/deployments). - A locally running application started via the [langgraph-cli](/langsmith/local-server). + +Studio experiments follow the same [data retention](/langsmith/administration-overview#data-retention) rules as other experiments. By default, traces have base tier retention (14 days). However, traces will automatically upgrade to extended tier retention (400 days) if feedback is added to them. Feedback can be added in one of two ways: + +- The [dataset has evaluators configured](/langsmith/bind-evaluator-to-dataset). +- [Feedback](/langsmith/observability-concepts#feedback) is manually added to a trace. + +This auto-upgrade increases both the retention period and the cost of the trace. For more details, refer to [Data retention auto-upgrades](/langsmith/administration-overview#how-it-works). + + ### Experiment setup 1. Launch the experiment. Click the **Run experiment** button in the top right corner of the Studio page. diff --git a/src/langsmith/quick-start-studio.mdx b/src/langsmith/quick-start-studio.mdx index f0ce428eac..4d0e9eb87f 100644 --- a/src/langsmith/quick-start-studio.mdx +++ b/src/langsmith/quick-start-studio.mdx @@ -3,7 +3,7 @@ title: Get started with Studio sidebarTitle: Quickstart --- -[Studio](/langsmith/studio) in the [LangSmith Deployments UI](https://smith.langchain.com) supports connecting to two types of graphs: +[Studio](/langsmith/studio) in the [LangSmith Deployment UI](https://smith.langchain.com) supports connecting to two types of graphs: - Graphs deployed on [cloud or self-hosted](#deployed-graphs). - Graphs running locally with [Agent Server](#local-development-server). diff --git a/src/langsmith/setup-app-requirements-txt.mdx b/src/langsmith/setup-app-requirements-txt.mdx index 565346f861..9d3aeafb4a 100644 --- a/src/langsmith/setup-app-requirements-txt.mdx +++ b/src/langsmith/setup-app-requirements-txt.mdx @@ -3,8 +3,7 @@ title: How to set up an application with requirements.txt sidebarTitle: With requirements.txt --- -import FrameworkAgnosticPy from '/snippets/langsmith/framework-agnostic-py.mdx'; -import FrameworkAgnosticJS from '/snippets/langsmith/framework-agnostic-js.mdx'; +import FrameworkAgnostic from '/snippets/langsmith/framework-agnostic.mdx'; import PrereleaseBehavior from '/snippets/langsmith/pre-release-behavior.mdx'; An application must be configured with a [configuration file](/langsmith/cli#configuration-file) in order to be deployed to LangSmith (or to be self-hosted). This how-to guide discusses the basic steps to set up an application for deployment using `requirements.txt` to specify project dependencies. @@ -29,12 +28,7 @@ my-app/ ``` -:::python - -::: -:::js - -::: + You can also set up with: diff --git a/src/langsmith/setup-javascript.mdx b/src/langsmith/setup-javascript.mdx index fa20dd2652..839e7211ca 100644 --- a/src/langsmith/setup-javascript.mdx +++ b/src/langsmith/setup-javascript.mdx @@ -3,8 +3,7 @@ title: How to set up a JavaScript application sidebarTitle: Set up a JavaScript application --- -import FrameworkAgnosticPy from '/snippets/langsmith/framework-agnostic-py.mdx'; -import FrameworkAgnosticJS from '/snippets/langsmith/framework-agnostic-js.mdx'; +import FrameworkAgnostic from '/snippets/langsmith/framework-agnostic.mdx'; An application must be configured with a [configuration file](/langsmith/cli#configuration-file) in order to be deployed to LangSmith (or to be self-hosted). This how-to guide discusses the basic steps to set up a JavaScript application for deployment using `package.json` to specify project dependencies. @@ -26,12 +25,7 @@ my-app/ ``` -:::python - -::: -:::js - -::: + After each step, an example file directory is provided to demonstrate how code can be organized. diff --git a/src/langsmith/setup-pyproject.mdx b/src/langsmith/setup-pyproject.mdx index c29ed5bc4f..6a931bb3d9 100644 --- a/src/langsmith/setup-pyproject.mdx +++ b/src/langsmith/setup-pyproject.mdx @@ -3,8 +3,7 @@ title: How to set up an application with pyproject.toml sidebarTitle: With pyproject.toml --- -import FrameworkAgnosticPy from '/snippets/langsmith/framework-agnostic-py.mdx'; -import FrameworkAgnosticJS from '/snippets/langsmith/framework-agnostic-js.mdx'; +import FrameworkAgnostic from '/snippets/langsmith/framework-agnostic.mdx'; import PrereleaseBehavior from '/snippets/langsmith/pre-release-behavior.mdx'; An application must be configured with a [configuration file](/langsmith/cli#configuration-file) in order to be deployed to LangSmith (or to be self-hosted). This how-to guide discusses the basic steps to set up an application for deployment using `pyproject.toml` to define your package's dependencies. @@ -28,12 +27,7 @@ my-app/ └── pyproject.toml # dependencies for your project ``` -:::python - -::: -:::js - -::: + You can also set up with: diff --git a/src/langsmith/use-stream-react.mdx b/src/langsmith/use-stream-react.mdx index 1b5157f9ff..36ebc16c02 100644 --- a/src/langsmith/use-stream-react.mdx +++ b/src/langsmith/use-stream-react.mdx @@ -8,7 +8,7 @@ sidebarTitle: Integrate LangGraph into your React application * [Agent Server](/langsmith/agent-server) -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) React hook provides a seamless way to integrate LangGraph into your React applications. It handles all the complexities of streaming, state management, and branching logic, letting you focus on building great chat experiences. +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) React hook provides a seamless way to integrate LangGraph into your React applications. It handles all the complexities of streaming, state management, and branching logic, letting you focus on building great chat experiences. Key features: @@ -17,11 +17,11 @@ Key features: * Conversation branching: Create alternate conversation paths from any point in the chat history * UI-agnostic design: bring your own components and styling -Let's explore how to use [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) in your React application. +Let's explore how to use [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) in your React application. -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) provides a solid foundation for creating bespoke chat experiences. For pre-built chat components and interfaces, we also recommend checking out [CopilotKit](https://docs.copilotkit.ai/coagents/quickstart/langgraph) and [assistant-ui](https://www.assistant-ui.com/docs/runtimes/langgraph). +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) provides a solid foundation for creating bespoke chat experiences. For pre-built chat components and interfaces, we also recommend checking out [CopilotKit](https://docs.copilotkit.ai/coagents/quickstart/langgraph) and [assistant-ui](https://www.assistant-ui.com/docs/runtimes/langgraph). -## Installation +## Install the SDK ```bash npm install @langchain/langgraph-sdk @langchain/core @@ -76,9 +76,9 @@ export default function App() { } ``` -## Customizing Your UI +## Customizing your UI -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) hook takes care of all the complex state management behind the scenes, providing you with simple interfaces to build your UI. Here's what you get out of the box: +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) hook takes care of all the complex state management behind the scenes, providing you with simple interfaces to build your UI. Here's what you get out of the box: * Thread state management * Loading and error states @@ -88,7 +88,7 @@ The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules Here are some examples on how to use these features effectively: -### Loading States +### Loading states The `isLoading` property tells you when a stream is active, enabling you to: @@ -118,7 +118,7 @@ export default function App() { ### Resume a stream after page refresh -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) hook can automatically resume an ongoing run upon mounting by setting `reconnectOnMount: true`. This is useful for continuing a stream after a page refresh, ensuring no messages and events generated during the downtime are lost. +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) hook can automatically resume an ongoing run upon mounting by setting `reconnectOnMount: true`. This is useful for continuing a stream after a page refresh, ensuring no messages and events generated during the downtime are lost. ```tsx const thread = useStream<{ messages: Message[] }>({ @@ -225,7 +225,7 @@ function useSearchParam(key: string) { } ``` -### Thread Management +### Thread management Keep track of conversations with built-in thread management. You can access the current thread ID and get notified when new threads are created: @@ -243,9 +243,9 @@ const thread = useStream<{ messages: Message[] }>({ We recommend storing the `threadId` in your URL's query parameters to let users resume conversations after page refreshes. -### Messages Handling +### Messages handling -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) hook will keep track of the message chunks received from the server and concatenate them together to form a complete message. The completed message chunks can be retrieved via the `messages` property. +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) hook will keep track of the message chunks received from the server and concatenate them together to form a complete message. The completed message chunks can be retrieved via the `messages` property. By default, the `messagesKey` is set to `messages`, where it will append the new messages chunks to `values["messages"]`. If you store messages in a different key, you can change the value of `messagesKey`. @@ -270,11 +270,29 @@ export default function HomePage() { } ``` -Under the hood, the [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) hook will use the `streamMode: "messages-tuple"` to receive a stream of messages (i.e. individual LLM tokens) from any LangChain chat model invocations inside your graph nodes. Learn more about messages streaming in the [streaming](/langsmith/streaming#messages) guide. +Under the hood, [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) automatically subscribes to multiple [stream modes](/langsmith/streaming#supported-stream-modes) to provide a complete picture of your graph's execution. The `messages` property specifically uses `messages-tuple` mode to receive individual LLM tokens from chat model invocations. Learn more about messages streaming in the [streaming](/langsmith/streaming#messages) guide. + +### Accessing full graph state + +Beyond messages, you can access the complete graph state via the `values` property. This includes any state your graph maintains, not just the conversation history: + +```tsx +const thread = useStream<{ messages: Message[]; context: string; metadata: Record }>({ + apiUrl: "http://localhost:2024", + assistantId: "agent", + messagesKey: "messages", +}); + +// Access the full state +console.log(thread.values); +// { messages: [...], context: "...", metadata: {...} } +``` + +This is powered by the `values` stream mode under the hood, which streams the full state after each graph step. ### Interrupts -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) hook exposes the `interrupt` property, which will be filled with the last interrupt from the thread. You can use interrupts to: +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) hook exposes the `interrupt` property, which will be filled with the last interrupt from the thread. You can use interrupts to: * Render a confirmation UI before executing a node * Wait for human input, allowing agent to ask the user with clarifying questions @@ -533,7 +551,7 @@ const CachedThreadExample = ({ threadId, cachedThreadData }) => { }; ``` -### Optimistic Thread Creation +### Optimistic thread creation Use the `threadId` option in `submit` function to enable optimistic UI patterns where you need to know the thread ID before the thread is actually created. @@ -576,7 +594,7 @@ const OptimisticThreadExample = () => { ### TypeScript -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) hook is friendly for apps written in TypeScript and you can specify types for the state to get better type safety and IDE support. +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) hook is friendly for apps written in TypeScript and you can specify types for the state to get better type safety and IDE support. ```tsx // Define your types @@ -651,14 +669,54 @@ const thread = useStream< ## Event Handling -The [`useStream()`](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) hook provides several callback options to help you respond to different events: +The [`useStream()`](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) hook provides callback options that give you access to different types of streaming events beyond just messages. You don't need to explicitly configure stream modes— just pass callbacks for the event types you want to handle: + +```tsx +const thread = useStream<{ messages: Message[] }>({ + apiUrl: "http://localhost:2024", + assistantId: "agent", + messagesKey: "messages", + + // Handle state updates after each graph step + onUpdateEvent: (update, options) => { + console.log("Graph update:", update); + // Access which node produced this update, the new state values, etc. + }, + + // Handle custom events streamed from your graph + onCustomEvent: (event, options) => { + console.log("Custom event:", event); + // React to progress updates, debug info, or any custom data + }, + + // Handle metadata events with run/thread info + onMetadataEvent: (metadata) => { + console.log("Run ID:", metadata.run_id); + console.log("Thread ID:", metadata.thread_id); + }, + + onError: (error) => { + console.error("Stream error:", error); + }, + + onFinish: (state, options) => { + console.log("Stream finished with final state:", state); + }, +}); +``` + +### Available callbacks + +| Callback | Description | Stream mode | +|----------|-------------|-------------| +| `onUpdateEvent` | Called when a state update is received after each graph step | `updates` | +| `onCustomEvent` | Called when a custom event is received from your graph. See the [streaming](/oss/langgraph/streaming#stream-custom-data) guide. | `custom` | +| `onMetadataEvent` | Called with run and thread metadata | `metadata` | +| `onError` | Called when an error occurs | - | +| `onFinish` | Called when the stream completes | - | -* `onError`: Called when an error occurs. -* `onFinish`: Called when the stream is finished. -* `onUpdateEvent`: Called when an update event is received. -* `onCustomEvent`: Called when a custom event is received. See the [streaming](/oss/langgraph/streaming#stream-custom-data) guide to learn how to stream custom events. -* `onMetadataEvent`: Called when a metadata event is received, which contains the Run ID and Thread ID. +This design means you can access rich streaming data (state updates, custom events, metadata) without manually configuring stream modes—`useStream` handles the subscription for you. ## Learn More -* [JS/TS SDK Reference](https://langchain-ai.github.io/langgraphjs/reference/modules/sdk.html) +* [useStream API Reference](https://reference.langchain.com/javascript/functions/_langchain_langgraph-sdk.react.useStream.html) diff --git a/src/oss/contributing/code.mdx b/src/oss/contributing/code.mdx index 60f70fd9eb..f91a1f9603 100644 --- a/src/oss/contributing/code.mdx +++ b/src/oss/contributing/code.mdx @@ -556,7 +556,102 @@ Contributions must adhere to the following quality requirements: :::python - **Required**: [Google-style docstrings](https://google.github.io/styleguide/pyguide.html) for all public functions + **Required**: [Google-style docstrings](https://google.github.io/styleguide/pyguide.html) for all public functions. + + **Guiding principle**: Docstrings describe "what"; docs on this site explain the "how" and "why." + + | Content type | Location | Purpose | + |--------------|----------|---------| + | Parameter descriptions | Docstrings | Auto-generates into API reference | + | Return types and exceptions | Docstrings | API reference | + | Minimal usage example | Docstrings | Show basic instantiation pattern | + | Feature tutorials | This site | In-depth walkthroughs | + | End-to-end examples | This site | Real-world usage patterns | + | Conceptual explanations | This site | Understanding and context | + + **Docstrings should contain:** + + 1. One-line summary of what the class/function does + 2. Link to this site for tutorials, guides, and usage patterns + 3. Parameter documentation with types and descriptions + 4. Return value description + 5. Exceptions that may be raised + 6. Single minimal example showing basic instantiation/usage as necessary + + + + ````python + class ChatAnthropic(BaseChatModel): + """Interface to Claude chat models. + + See the [usage guide](https://docs.langchain.com/oss/python/integrations/chat/anthropic) + for tutorials, feature walkthroughs, and examples. + + Args: + model: Model identifier (e.g., `'claude-sonnet-4-5-20250929'`). + temperature: Sampling temperature between `0` and `1`. + max_tokens: Maximum number of tokens to generate. + api_key: Anthropic API key. + + If not provided, reads from the `ANTHROPIC_API_KEY` + environment variable. + timeout: Request timeout in seconds. + max_retries: Maximum number of retries for failed requests. + + Returns: + A chat model instance that can be invoked with messages. + + Raises: + ValueError: If the model identifier is not recognized. + AuthenticationError: If the API key is invalid. + + Example: + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + response = model.invoke("Hello!") + ``` + """ + ```` + + + Avoid duplicating content that belongs in OSS docs: + + - **Feature tutorials**: Don't include extended walkthroughs. Instead, link to this site: + + ```python + """ + ... + + See the [extended thinking guide](https://docs.langchain.com/oss/integrations/chat/anthropic#extended-thinking) + for configuration options. + + ... + """ + ``` + + - **Multiple example variations**: Include one minimal example, then link to comprehensive guides: + + ```python + """ + Example: + \`\`\`python + message = HumanMessage(content=[ + {"type": "image", "url": "https://example.com/image.jpg"} + ]) + \`\`\` + + See the [multimodal guide](https://docs.langchain.com/oss/integrations/chat/anthropic#multimodal) + for all supported input formats. + """ + ``` + + - **Conceptual explanations**: Keep to factual parameter descriptions. Link to docs for deeper context. + + - **MkDocs-specific syntax**: Avoid `???+`, accordions, or tabs in docstrings. They don't render in IDEs. + + ::: :::js **Required**: [JSDocs](https://jsdoc.app/about-getting-started) for all exported functions and interfaces @@ -592,11 +687,6 @@ Contributions must adhere to the following quality requirements: } ``` ::: - - - Document all parameters and return values - - Include usage examples for complex functions - - Document raised exceptions - - Focus on "why" rather than "what" diff --git a/src/oss/contributing/documentation.mdx b/src/oss/contributing/documentation.mdx index b76864a53f..891bf4642f 100644 --- a/src/oss/contributing/documentation.mdx +++ b/src/oss/contributing/documentation.mdx @@ -22,20 +22,20 @@ For quick changes like fixing typos or changing a link, you can edit directly on 1. At the bottom of the page you want to edit, click the link **Edit the source of this page on GitHub**. -1. GitHub will prompt you to fork the repository to your account. Make sure to fork into your personal account. +1. GitHub will prompt you to fork the repository to your account. Make sure to fork into your personal account. 1. Make the changes directly in GitHub's web editor. 1. Click **Commit changes...** and give your commit a descriptive title like `fix(docs): summary of change`. If applicable, add an [extended description](https://www.gitkraken.com/learn/git/best-practices/git-commit-message#git-commit-message-structure). -1. GitHub will redirect you to create a pull request. Give it a title (often the same as the commit) and follow the PR template checklist, if present. +1. GitHub will redirect you to create a pull request. Give it a title (often the same as the commit) and follow the PR template checklist. Docs PRs are typically reviewed within a few days. Keep an eye on your PR to address any feedback from maintainers. - Do not bump the PR unless you have new information to provide - maintainers will address it as their availability permits. + Do not bump the PR unless you have new information to provide – maintainers will address it as their availability permits. ### Larger edits and additions -For larger changes, additions, or ongoing contributions, it's important to set up a local development environment on your machine. Our documentation build pipeline offers local preview and live reload as you edit, important for ensuring your changes appear as intended before submitting. +For larger changes, additions, or ongoing contributions, it's necessary to set up a local development environment on your machine. Our documentation build pipeline offers local preview, important for ensuring your changes appear as intended before submitting. #### Set up local environment @@ -43,22 +43,26 @@ Before you can work on this project, ensure you have the following installed: **Required:** -- **Python** >= 3.13.0, < 4.0.0 -- **[`uv`](https://docs.astral.sh/uv/)** - Python package manager (used for dependency management) -- **Node.js** and **npm** - For Mintlify CLI and reference documentation builds -- **Make** - For running build commands (usually pre-installed on macOS/Linux) -- **Git** - For version control +- `python >= 3.13, < 4.0` +- [**`uv`**](https://docs.astral.sh/uv/) - Python package manager (used for dependency management) +- [**Node.js**](https://nodejs.org/en) and [**`npm`**](https://www.npmjs.com/) - For Mintlify CLI and reference documentation builds +- [**Make**](https://www.gnu.org/software/make/) - For running build commands +- [**Git**](https://git-scm.com/) - For version control **Optional but recommended:** - **[`markdownlint-cli`](https://github.com/igorshubovych/markdownlint-cli)** - For linting markdown files - ```bash - npm install -g markdownlint-cli - ``` + + ```bash + npm install -g markdownlint-cli + ``` - **[`pnpm`](https://pnpm.io/)** - Required only if you're working on reference documentation - ```bash - npm install -g pnpm@10.14.0 - ``` + + ```bash + npm install -g pnpm@10.14.0 + ``` + +- **[Mintlify MDX VSCode extension](https://www.mintlify.com/blog/mdx-vscode-extension)** **Setup steps:** @@ -98,12 +102,12 @@ See [Available commands](#available-commands) for more details. #### Edit documentation -**Only edit files in `src/`** – The `build/` directory is automatically generated. + **Only edit files in `src/`** – The `build/` directory is automatically generated. -1. Ensure your [dev environment is set up](#set-up-local-environment) and that you have followed the steps in [IDE_SETUP.md](https://github.com/langchain-ai/docs/blob/main/IDE_SETUP.md) to configure your IDE/editor to automatically apply the correct settings. +1. Ensure your [environment is set up](#set-up-local-environment) and that you have followed the steps in [`IDE_SETUP.md`](https://github.com/langchain-ai/docs/blob/main/IDE_SETUP.md) to configure your IDE/editor to automatically apply the correct settings. -1. Edit files in `src/`: +1. Edit files in `src/` * Make changes to markdown files and the build system will automatically detect changes and rebuild affected files. * If OSS content varies between Python and JavaScript/TypeScript, add content for [both in the same file](#co-locate-python-and-javascripttypescript-oss-content). Otherwise, content will be identical for both languages. * Use [Mintlify syntax](https://mintlify.com/docs) for formatting. @@ -116,33 +120,40 @@ See [Available commands](#available-commands) for more details. This starts a development server with hot reload at `http://localhost:3000`. -1. Iterate: +1. Iterate + * Continue editing and see changes reflected immediately. * The development server rebuilds only changed files for faster feedback. 1. Run the [quality checks](#run-quality-checks) to ensure your changes are valid. 1. Get approval from the relevant reviewers. - To generate a preview build, [create a sharable preview build](#create-a-sharable-preview-build) (internal team members only). + LangChain team members can [generate a sharable preview build](#create-a-sharable-preview-build) -1. [Publish to production](#publish-to-prod) (internal team members only). +1. [Publish to production](#publish-to-prod) (team members only). #### Create a sharable preview build - Only internal team members can create sharable preview builds. + Only LangChain team members can create sharable preview builds. -When you create or update a PR, a [preview branch/ID](https://github.com/langchain-ai/docs/actions/workflows/create-preview-branch.yml) is automatically generated for you. A comment will be left on the PR with the ID, which you can then use to generate a preview. You can also run this workflow manually if needed. Previews are useful for sharing work-in-progress changes with others. + + Previews are useful for sharing work-in-progress changes with others. + + When you create or update a PR, a [preview branch/ID](https://github.com/langchain-ai/docs/actions/workflows/create-preview-branch.yml) is automatically generated for you. A comment will be left on the PR with the ID, which you can then use to generate a preview. (You can also run this workflow manually if needed.) -1. Copy the preview branch's ID from the comment. -1. In the [Mintlify dashboard](https://dashboard.mintlify.com/langchain-5e9cc07a/langchain-5e9cc07a?section=previews), click **Create preview deployment**. -1. Enter the preview branch's ID. -1. Click **Create deployment**. - A **Manual update** will display in the **Previews** table. -1. Select the preview and click **Visit** to view the preview build. + 1. Copy the preview branch's ID from the comment. + 1. In the [Mintlify dashboard](https://dashboard.mintlify.com/langchain-5e9cc07a/langchain-5e9cc07a?section=previews), click **Create preview deployment**. + 1. Enter the preview branch's ID. + 1. Click **Create deployment**. + A **Manual update** will display in the **Previews** table. + 1. Select the preview and click **Visit** to view the preview build. -To redeploy the preview build, click **Redeploy** on the Mintlify dashboard. + To redeploy the preview build with the latest changes, click **Redeploy** on the Mintlify dashboard. + #### Run quality checks @@ -165,7 +176,7 @@ make lint_md_fix make test ``` -For more details, see the [available commands](https://github.com/langchain-ai/docs?tab=readme-ov-file#available-commands) section in the README. +For more details, see the [available commands](https://github.com/langchain-ai/docs?tab=readme-ov-file#available-commands) section in the `README`. All pull requests are automatically checked by CI/CD. The same linting and formatting standards will be enforced, and PRs cannot be merged if these checks fail. @@ -177,12 +188,16 @@ For more details, see the [available commands](https://github.com/langchain-ai/d Only internal team members can publish to production. -Once your branch has been merged into `main`, you need to push the changes to `prod` for them to render on the live docs site. Use the [Publish documentation GH action](https://github.com/langchain-ai/docs/actions/workflows/publish.yml): + + Once your branch has been merged into `main`, you need to push the changes to `prod` for them to render on the live docs site. Use the [Publish documentation GH action](https://github.com/langchain-ai/docs/actions/workflows/publish.yml): -1. Go to [Publish documentation](https://github.com/langchain-ai/docs/actions/workflows/publish.yml). -2. Click the **Run workflow** button. -3. Select the **main** branch to deploy. -4. Click **Run workflow**. + 1. Go to [Publish documentation](https://github.com/langchain-ai/docs/actions/workflows/publish.yml). + 2. Click the **Run workflow** button. + 3. Select the **main** branch to deploy. + 4. Click **Run workflow**. + ## Documentation types @@ -429,6 +444,51 @@ Ensure documentation is accessible to all users: - Use specific, actionable link text instead of "click here" - Include descriptive alt text for all images and diagrams +### Cross-referencing + +Use consistent cross-references to connect docs with API reference documentation. + +**From docs to API reference:** + +Use the `@[]` syntax to link to API reference pages: + +```mdx +See \@[`ChatAnthropic`] for all configuration options. + +The \@[`bind_tools`][ChatAnthropic.bind_tools] method accepts... +``` + +The build pipeline transforms these into proper markdown links based on the current language scope (Python or JavaScript). For example, `\@[ChatAnthropic]` becomes a link to the Python or JS API reference page depending on which version of the docs is being built, **but only if an entry exists in the `link_map.py` file!** See below for details. + + + +The `@[]` syntax is processed by [`handle_auto_links.py`](https://github.com/langchain-ai/docs/blob/main/pipeline/preprocessors/handle_auto_links.py). It looks up link keys in [`link_map.py`](https://github.com/langchain-ai/docs/blob/main/pipeline/preprocessors/link_map.py), which contains dictionary mappings for both Python and JavaScript scopes. + +**Supported formats:** + +| Syntax | Result | +|--------|--------| +| `\@[ChatAnthropic]` | Link with "ChatAnthropic" as the displayed text | +| `` \@[`ChatAnthropic`] `` | Link with `` `ChatAnthropic` `` (code formatted) as text | +| `\@[text][ChatAnthropic]` | Link with "text" as text and `ChatAnthropic` as the key in the link map | +| `\\@[ChatAnthropic]` | Escaped: renders as literal `\@[ChatAnthropic]` (no link – what's being used on this page!) | + +**Adding new links:** + +If a link isn't found in the map, it will be left unchanged in the output. To add a new autolink: + +1. Open `pipeline/preprocessors/link_map.py` +2. Add an entry to the appropriate scope (`python` or `js`) in `LINK_MAPS` +3. The key is the link name used in `\@[key]` or `\@[text][key]`, the value is the path relative to the reference host + + + +:::python +**From API reference stubs to OSS docs:** + +See the [`README`](https://github.com/langchain-ai/docs/blob/main/reference/python/README.md) for more information on linking from API reference stubs to Python OSS docs. Specifically see the `mkdocstrings` cross-reference [linking syntax](https://github.com/langchain-ai/docs/blob/main/reference/python/README.md#mkdocsmkdocstrings-python-cross-reference-linking-syntax). +::: + ## Get help Our goal is to have the simplest developer setup possible. Should you experience any difficulty getting setup, please ask in the [community slack](https://www.langchain.com/join-community) or open a [forum post](https://forum.langchain.com/). Internal team members can reach out in the [#documentation](https://langchain.slack.com/archives/C04GWPE38LV) Slack channel. diff --git a/src/oss/deepagents/backends.mdx b/src/oss/deepagents/backends.mdx index df93256c80..acbf77ee0b 100644 --- a/src/oss/deepagents/backends.mdx +++ b/src/oss/deepagents/backends.mdx @@ -139,7 +139,7 @@ const agent = createDeepAgent({ **Best for:** - When you already run with a configured LangGraph store (for example, Redis, Postgres, or cloud implementations behind `BaseStore`). -- When you're deploying your agent through LangSmith Deployments (a store is automatically provisioned for your agent). +- When you're deploying your agent through LangSmith Deployment (a store is automatically provisioned for your agent). ### CompositeBackend (router) diff --git a/src/oss/javascript/integrations/providers/all_providers.mdx b/src/oss/javascript/integrations/providers/all_providers.mdx index f913fe87b5..374a913873 100644 --- a/src/oss/javascript/integrations/providers/all_providers.mdx +++ b/src/oss/javascript/integrations/providers/all_providers.mdx @@ -49,6 +49,28 @@ Browse the complete collection of integrations available for JavaScript/TypeScri +## LangGraph integrations + +Connect LangGraph agents to front ends. See the [LangGraph integrations](/oss/langgraph/integrations) page for more details. + + + + Open event-based protocol for connecting LangGraph agents to any frontend. + + + + React framework with pre-built UI components for AI copilots. + + + ## Chat Models diff --git a/src/oss/javascript/integrations/tools/anthropic.mdx b/src/oss/javascript/integrations/tools/anthropic.mdx new file mode 100644 index 0000000000..854e18bafd --- /dev/null +++ b/src/oss/javascript/integrations/tools/anthropic.mdx @@ -0,0 +1,614 @@ +--- +title: Tools +--- + +The `@langchain/anthropic` package provides LangChain-compatible wrappers for Anthropic's built-in tools. These tools can be bound to `ChatAnthropic` using `bindTools()` or @[`createAgent`]. + +### Memory Tool + +The memory tool (`memory_20250818`) enables Claude to store and retrieve information across conversations through a memory file directory. Claude can create, read, update, and delete files that persist between sessions, allowing it to build knowledge over time without keeping everything in the context window. + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; + +// Create a simple in-memory file store (or use your own persistence layer) +const files = new Map(); + +const memory = tools.memory_20250818({ + execute: async (command) => { + switch (command.command) { + case "view": + if (!command.path || command.path === "/") { + return Array.from(files.keys()).join("\n") || "Directory is empty."; + } + return ( + files.get(command.path) ?? `Error: File not found: ${command.path}` + ); + case "create": + files.set(command.path!, command.file_text ?? ""); + return `Successfully created file: ${command.path}`; + case "str_replace": + const content = files.get(command.path!); + if (content && command.old_str) { + files.set( + command.path!, + content.replace(command.old_str, command.new_str ?? "") + ); + } + return `Successfully replaced text in: ${command.path}`; + case "delete": + files.delete(command.path!); + return `Successfully deleted: ${command.path}`; + // Handle other commands: insert, rename + default: + return `Unknown command`; + } + }, +}); + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +const llmWithMemory = llm.bindTools([memory]); + +const response = await llmWithMemory.invoke( + "Remember that my favorite programming language is TypeScript" +); +``` + +For more information, see [Anthropic's Memory Tool documentation](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/memory-tool). + +### Web Search Tool + +The web search tool (`webSearch_20250305`) gives Claude direct access to real-time web content, allowing it to answer questions with up-to-date information beyond its knowledge cutoff. Claude automatically cites sources from search results as part of its answer. + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +// Basic usage +const response = await llm.invoke("What is the weather in NYC?", { + tools: [tools.webSearch_20250305()], +}); +``` + +The web search tool supports several configuration options: + +```typescript +const response = await llm.invoke("Latest news about AI?", { + tools: [ + tools.webSearch_20250305({ + // Maximum number of times the tool can be used in the API request + maxUses: 5, + // Only include results from these domains + allowedDomains: ["reuters.com", "bbc.com"], + // Or block specific domains (cannot be used with allowedDomains) + // blockedDomains: ["example.com"], + // Provide user location for more relevant results + userLocation: { + type: "approximate", + city: "San Francisco", + region: "California", + country: "US", + timezone: "America/Los_Angeles", + }, + }), + ], +}); +``` + +For more information, see [Anthropic's Web Search Tool documentation](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-search-tool). + +### Web Fetch Tool + +The web fetch tool (`webFetch_20250910`) allows Claude to retrieve full content from specified web pages and PDF documents. Claude can only fetch URLs that have been explicitly provided by the user or that come from previous web search or web fetch results. + +> **⚠️ Security Warning:** Enabling the web fetch tool in environments where Claude processes untrusted input alongside sensitive data poses data exfiltration risks. We recommend only using this tool in trusted environments or when handling non-sensitive data. + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +// Basic usage - fetch content from a URL +const response = await llm.invoke( + "Please analyze the content at https://example.com/article", + { tools: [tools.webFetch_20250910()] } +); +``` + +The web fetch tool supports several configuration options: + +```typescript +const response = await llm.invoke( + "Summarize this research paper: https://arxiv.org/abs/2024.12345", + { + tools: [ + tools.webFetch_20250910({ + // Maximum number of times the tool can be used in the API request + maxUses: 5, + // Only fetch from these domains + allowedDomains: ["arxiv.org", "example.com"], + // Or block specific domains (cannot be used with allowedDomains) + // blockedDomains: ["example.com"], + // Enable citations for fetched content (optional, unlike web search) + citations: { enabled: true }, + // Maximum content length in tokens (helps control token usage) + maxContentTokens: 50000, + }), + ], + } +); +``` + +You can combine web fetch with web search for comprehensive information gathering: + +```typescript +import { tools } from "@langchain/anthropic"; + +const response = await llm.invoke( + "Find recent articles about quantum computing and analyze the most relevant one", + { + tools: [ + tools.webSearch_20250305({ maxUses: 3 }), + tools.webFetch_20250910({ maxUses: 5, citations: { enabled: true } }), + ], + } +); +``` + +For more information, see [Anthropic's Web Fetch Tool documentation](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/web-fetch-tool). + +### Tool Search Tools + +The tool search tools enable Claude to work with hundreds or thousands of tools by dynamically discovering and loading them on-demand. This is useful when you have a large number of tools but don't want to load them all into the context window at once. + +There are two variants: + +- **`toolSearchRegex_20251119`** - Claude constructs regex patterns (using Python's `re.search()` syntax) to search for tools +- **`toolSearchBM25_20251119`** - Claude uses natural language queries to search for tools using the BM25 algorithm + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; +import { tool } from "langchain"; +import { z } from "zod"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +// Create tools with defer_loading to make them discoverable via search +const getWeather = tool( + async (input: { location: string }) => { + return `Weather in ${input.location}: Sunny, 72°F`; + }, + { + name: "get_weather", + description: "Get the weather at a specific location", + schema: z.object({ + location: z.string(), + }), + extras: { defer_loading: true }, + } +); + +const getNews = tool( + async (input: { topic: string }) => { + return `Latest news about ${input.topic}...`; + }, + { + name: "get_news", + description: "Get the latest news about a topic", + schema: z.object({ + topic: z.string(), + }), + extras: { defer_loading: true }, + } +); + +// Claude will search and discover tools as needed +const response = await llm.invoke("What is the weather in San Francisco?", { + tools: [tools.toolSearchRegex_20251119(), getWeather, getNews], +}); +``` + +Using the BM25 variant for natural language search: + +```typescript +import { tools } from "@langchain/anthropic"; + +const response = await llm.invoke("What is the weather in San Francisco?", { + tools: [tools.toolSearchBM25_20251119(), getWeather, getNews], +}); +``` + +For more information, see [Anthropic's Tool Search documentation](https://docs.anthropic.com/en/docs/build-with-claude/tool-use/tool-search-tool). + +### Text Editor Tool + +The text editor tool (`textEditor_20250728`) enables Claude to view and modify text files, helping debug, fix, and improve code or other text documents. Claude can directly interact with files, providing hands-on assistance rather than just suggesting changes. + +Available commands: + +- `view` - Examine file contents or list directory contents +- `str_replace` - Replace specific text in a file +- `create` - Create a new file with specified content +- `insert` - Insert text at a specific line number + +```typescript +import fs from "node:fs"; +import { ChatAnthropic, tools } from "@langchain/anthropic"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +const textEditor = tools.textEditor_20250728({ + async execute(args) { + switch (args.command) { + case "view": + const content = fs.readFileSync(args.path, "utf-8"); + // Return with line numbers for Claude to reference + return content + .split("\n") + .map((line, i) => `${i + 1}: ${line}`) + .join("\n"); + case "str_replace": + let fileContent = fs.readFileSync(args.path, "utf-8"); + fileContent = fileContent.replace(args.old_str, args.new_str); + fs.writeFileSync(args.path, fileContent); + return "Successfully replaced text."; + case "create": + fs.writeFileSync(args.path, args.file_text); + return `Successfully created file: ${args.path}`; + case "insert": + const lines = fs.readFileSync(args.path, "utf-8").split("\n"); + lines.splice(args.insert_line, 0, args.new_str); + fs.writeFileSync(args.path, lines.join("\n")); + return `Successfully inserted text at line ${args.insert_line}`; + default: + return "Unknown command"; + } + }, + // Optional: limit file content length when viewing + maxCharacters: 10000, +}); + +const llmWithEditor = llm.bindTools([textEditor]); + +const response = await llmWithEditor.invoke( + "There's a syntax error in my primes.py file. Can you help me fix it?" +); +``` + +For more information, see [Anthropic's Text Editor Tool documentation](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/text-editor-tool). + +### Computer Use Tool + +The computer use tools enable Claude to interact with desktop environments through screenshot capture, mouse control, and keyboard input for autonomous desktop interaction. + +> **⚠️ Security Warning:** Computer use is a beta feature with unique risks. Use a dedicated virtual machine or container with minimal privileges. Avoid giving access to sensitive data. + +There are two variants: + +- **`computer_20251124`** - For Claude Opus 4.5 (includes zoom capability) +- **`computer_20250124`** - For Claude 4 and Claude 3.7 models + +Available actions: + +- `screenshot` - Capture the current screen +- `left_click`, `right_click`, `middle_click` - Mouse clicks at coordinates +- `double_click`, `triple_click` - Multi-click actions +- `left_click_drag` - Click and drag operations +- `left_mouse_down`, `left_mouse_up` - Granular mouse control +- `scroll` - Scroll the screen +- `type` - Type text +- `key` - Press keyboard keys/shortcuts +- `mouse_move` - Move the cursor +- `hold_key` - Hold a key while performing other actions +- `wait` - Wait for a specified duration +- `zoom` - View specific screen regions at full resolution (Claude Opus 4.5 only) + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +const computer = tools.computer_20250124({ + // Required: specify display dimensions + displayWidthPx: 1024, + displayHeightPx: 768, + // Optional: X11 display number + displayNumber: 1, + execute: async (action) => { + switch (action.action) { + case "screenshot": + // Capture and return base64-encoded screenshot + // ... + case "left_click": + // Click at the specified coordinates + // ... + // ... + } + }, +}); + +const llmWithComputer = llm.bindTools([computer]); + +const response = await llmWithComputer.invoke( + "Save a picture of a cat to my desktop." +); +``` + +For Claude Opus 4.5 with zoom support: + +```typescript +import { tools } from "@langchain/anthropic"; + +const computer = tools.computer_20251124({ + displayWidthPx: 1920, + displayHeightPx: 1080, + // Enable zoom for detailed screen region inspection + enableZoom: true, + execute: async (action) => { + // Handle actions including "zoom" for Claude Opus 4.5 + // ... + }, +}); +``` + +For more information, see [Anthropic's Computer Use documentation](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/computer-use). + +### Code Execution Tool + +The code execution tool (`codeExecution_20250825`) allows Claude to run Bash commands and manipulate files in a secure, sandboxed environment. Claude can analyze data, create visualizations, perform calculations, and process files. + +When this tool is provided, Claude automatically gains access to: + +- **Bash commands** - Execute shell commands for system operations +- **File operations** - Create, view, and edit files directly + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +// Basic usage - calculations and data analysis +const response = await llm.invoke( + "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", + { tools: [tools.codeExecution_20250825()] } +); + +// File operations and visualization +const response2 = await llm.invoke( + "Create a matplotlib visualization of sales data and save it as chart.png", + { tools: [tools.codeExecution_20250825()] } +); +``` + +Container reuse for multi-step workflows: + +```typescript +// First request - creates a container +const response1 = await llm.invoke("Write a random number to /tmp/number.txt", { + tools: [tools.codeExecution_20250825()], +}); + +// Extract container ID from response for reuse +const containerId = response1.response_metadata?.container?.id; + +// Second request - reuse container to access the file +const response2 = await llm.invoke( + "Read /tmp/number.txt and calculate its square", + { + tools: [tools.codeExecution_20250825()], + container: containerId, + } +); +``` + +For more information, see [Anthropic's Code Execution Tool documentation](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/code-execution-tool). + +### Bash Tool + +The bash tool (`bash_20250124`) enables shell command execution in a persistent bash session. Unlike the sandboxed code execution tool, this tool requires you to provide your own execution environment. + +> **⚠️ Security Warning:** The bash tool provides direct system access. Implement safety measures such as running in isolated environments (Docker/VM), command filtering, and resource limits. + +The bash tool provides: + +- **Persistent bash session** - Maintains state between commands +- **Shell command execution** - Run any shell command +- **Environment access** - Access to environment variables and working directory +- **Command chaining** - Support for pipes, redirects, and scripting + +Available commands: + +- Execute a command: `{ command: "ls -la" }` +- Restart the session: `{ restart: true }` + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; +import { execSync } from "child_process"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +const bash = tools.bash_20250124({ + execute: async (args) => { + if (args.restart) { + // Reset session state + return "Bash session restarted"; + } + try { + const output = execSync(args.command, { + encoding: "utf-8", + timeout: 30000, + }); + return output; + } catch (error) { + return `Error: ${(error as Error).message}`; + } + }, +}); + +const llmWithBash = llm.bindTools([bash]); + +const response = await llmWithBash.invoke( + "List all Python files in the current directory" +); + +// Process tool calls and execute commands +console.log(response.tool_calls?.[0].name); // "bash" +console.log(response.tool_calls?.[0].args.command); // "ls -la *.py" +``` + +For more information, see [Anthropic's Bash Tool documentation](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/bash-tool). + +### MCP Toolset + +The MCP toolset (`mcpToolset_20251120`) enables Claude to connect to remote MCP (Model Context Protocol) servers directly from the Messages API without implementing a separate MCP client. This allows Claude to use tools provided by MCP servers. + +Key features: + +- **Direct API integration** - Connect to MCP servers without implementing an MCP client +- **Tool calling support** - Access MCP tools through the Messages API +- **Flexible tool configuration** - Enable all tools, allowlist specific tools, or denylist unwanted tools +- **Per-tool configuration** - Configure individual tools with custom settings +- **OAuth authentication** - Support for OAuth Bearer tokens for authenticated servers +- **Multiple servers** - Connect to multiple MCP servers in a single request + +```typescript +import { ChatAnthropic, tools } from "@langchain/anthropic"; + +const llm = new ChatAnthropic({ + model: "claude-sonnet-4-5-20250929", +}); + +// Basic usage - enable all tools from an MCP server +const response = await llm.invoke("What tools do you have available?", { + mcp_servers: [ + { + type: "url", + url: "https://example-server.modelcontextprotocol.io/sse", + name: "example-mcp", + authorization_token: "YOUR_TOKEN", + }, + ], + tools: [tools.mcpToolset_20251120({ serverName: "example-mcp" })], +}); +``` + +**Allowlist pattern** - Enable only specific tools: + +```typescript +const response = await llm.invoke("Search for events", { + mcp_servers: [ + { + type: "url", + url: "https://calendar.example.com/sse", + name: "google-calendar-mcp", + authorization_token: "YOUR_TOKEN", + }, + ], + tools: [ + tools.mcpToolset_20251120({ + serverName: "google-calendar-mcp", + // Disable all tools by default + defaultConfig: { enabled: false }, + // Explicitly enable only these tools + configs: { + search_events: { enabled: true }, + create_event: { enabled: true }, + }, + }), + ], +}); +``` + +**Denylist pattern** - Disable specific tools: + +```typescript +const response = await llm.invoke("List my events", { + mcp_servers: [ + { + type: "url", + url: "https://calendar.example.com/sse", + name: "google-calendar-mcp", + authorization_token: "YOUR_TOKEN", + }, + ], + tools: [ + tools.mcpToolset_20251120({ + serverName: "google-calendar-mcp", + // All tools enabled by default, just disable dangerous ones + configs: { + delete_all_events: { enabled: false }, + share_calendar_publicly: { enabled: false }, + }, + }), + ], +}); +``` + +**Multiple MCP servers**: + +```typescript +const response = await llm.invoke("Use tools from both servers", { + mcp_servers: [ + { + type: "url", + url: "https://mcp.example1.com/sse", + name: "mcp-server-1", + authorization_token: "TOKEN1", + }, + { + type: "url", + url: "https://mcp.example2.com/sse", + name: "mcp-server-2", + authorization_token: "TOKEN2", + }, + ], + tools: [ + tools.mcpToolset_20251120({ serverName: "mcp-server-1" }), + tools.mcpToolset_20251120({ + serverName: "mcp-server-2", + defaultConfig: { deferLoading: true }, + }), + ], +}); +``` + +**With Tool Search** - Use deferred loading for on-demand tool discovery: + +```typescript +const response = await llm.invoke("Find and use the right tool", { + mcp_servers: [ + { + type: "url", + url: "https://example.com/sse", + name: "example-mcp", + }, + ], + tools: [ + tools.toolSearchRegex_20251119(), + tools.mcpToolset_20251120({ + serverName: "example-mcp", + defaultConfig: { deferLoading: true }, + }), + ], +}); +``` + +For more information, see [Anthropic's MCP Connector documentation](https://docs.anthropic.com/en/docs/agents-and-tools/mcp-connector). diff --git a/src/oss/javascript/integrations/tools/openai.mdx b/src/oss/javascript/integrations/tools/openai.mdx new file mode 100644 index 0000000000..f4dff84378 --- /dev/null +++ b/src/oss/javascript/integrations/tools/openai.mdx @@ -0,0 +1,584 @@ +--- +title: Tools +--- + +The `@langchain/openai` package provides LangChain-compatible wrappers for OpenAI's built-in tools. These tools can be bound to `ChatOpenAI` using `bindTools()` or @[`createAgent`]. + +### Web Search Tool + +The web search tool allows OpenAI models to search the web for up-to-date information before generating a response. Web search supports three main types: + +1. **Non-reasoning web search**: Quick lookups where the model passes queries directly to the search tool +2. **Agentic search with reasoning models**: The model actively manages the search process, analyzing results and deciding whether to keep searching +3. **Deep research**: Extended investigations using models like `o3-deep-research` or `gpt-5` with high reasoning effort + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; + +const model = new ChatOpenAI({ + model: "gpt-4o", +}); + +// Basic usage +const response = await model.invoke( + "What was a positive news story from today?", + { + tools: [tools.webSearch()], + } +); +``` + +**Domain filtering** - Limit search results to specific domains (up to 100): + +```typescript +const response = await model.invoke("Latest AI research news", { + tools: [ + tools.webSearch({ + filters: { + allowedDomains: ["arxiv.org", "nature.com", "science.org"], + }, + }), + ], +}); +``` + +**User location** - Refine search results based on geography: + +```typescript +const response = await model.invoke("What are the best restaurants near me?", { + tools: [ + tools.webSearch({ + userLocation: { + type: "approximate", + country: "US", + city: "San Francisco", + region: "California", + timezone: "America/Los_Angeles", + }, + }), + ], +}); +``` + +**Cache-only mode** - Disable live internet access: + +```typescript +const response = await model.invoke("Find information about OpenAI", { + tools: [ + tools.webSearch({ + externalWebAccess: false, + }), + ], +}); +``` + +For more information, see [OpenAI's Web Search Documentation](https://platform.openai.com/docs/guides/tools-web-search). + +### MCP Tool (Model Context Protocol) + +The MCP tool allows OpenAI models to connect to remote MCP servers and OpenAI-maintained service connectors, giving models access to external tools and services. + +There are two ways to use MCP tools: + +1. **Remote MCP servers**: Connect to any public MCP server via URL +2. **Connectors**: Use OpenAI-maintained wrappers for popular services like Google Workspace or Dropbox + +**Remote MCP server** - Connect to any MCP-compatible server: + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; + +const model = new ChatOpenAI({ model: "gpt-4o" }); + +const response = await model.invoke("Roll 2d4+1", { + tools: [ + tools.mcp({ + serverLabel: "dmcp", + serverDescription: "A D&D MCP server for dice rolling", + serverUrl: "https://dmcp-server.deno.dev/sse", + requireApproval: "never", + }), + ], +}); +``` + +**Service connectors** - Use OpenAI-maintained connectors for popular services: + +```typescript +const response = await model.invoke("What's on my calendar today?", { + tools: [ + tools.mcp({ + serverLabel: "google_calendar", + connectorId: "connector_googlecalendar", + authorization: "", + requireApproval: "never", + }), + ], +}); +``` + +For more information, see [OpenAI's MCP Documentation](https://platform.openai.com/docs/guides/tools-remote-mcp). + +### Code Interpreter Tool + +The Code Interpreter tool allows models to write and run Python code in a sandboxed environment to solve complex problems. + +Use Code Interpreter for: + +- **Data analysis**: Processing files with diverse data and formatting +- **File generation**: Creating files with data and images of graphs +- **Iterative coding**: Writing and running code iteratively to solve problems +- **Visual intelligence**: Cropping, zooming, rotating, and transforming images + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; + +const model = new ChatOpenAI({ model: "gpt-4.1" }); + +// Basic usage with auto container (default 1GB memory) +const response = await model.invoke("Solve the equation 3x + 11 = 14", { + tools: [tools.codeInterpreter()], +}); +``` + +**Memory configuration** - Choose from 1GB (default), 4GB, 16GB, or 64GB: + +```typescript +const response = await model.invoke( + "Analyze this large dataset and create visualizations", + { + tools: [ + tools.codeInterpreter({ + container: { memoryLimit: "4g" }, + }), + ], + } +); +``` + +**With files** - Make uploaded files available to the code: + +```typescript +const response = await model.invoke("Process the uploaded CSV file", { + tools: [ + tools.codeInterpreter({ + container: { + memoryLimit: "4g", + fileIds: ["file-abc123", "file-def456"], + }, + }), + ], +}); +``` + +**Explicit container** - Use a pre-created container ID: + +```typescript +const response = await model.invoke("Continue working with the data", { + tools: [ + tools.codeInterpreter({ + container: "cntr_abc123", + }), + ], +}); +``` + +> **Note**: Containers expire after 20 minutes of inactivity. While called "Code Interpreter", the model knows it as the "python tool" - for explicit prompting, ask for "the python tool" in your prompts. + +For more information, see [OpenAI's Code Interpreter Documentation](https://platform.openai.com/docs/guides/tools-code-interpreter). + +### File Search Tool + +The File Search tool allows models to search your files for relevant information using semantic and keyword search. It enables retrieval from a knowledge base of previously uploaded files stored in vector stores. + +**Prerequisites**: Before using File Search, you must: + +1. Upload files to the File API with `purpose: "assistants"` +2. Create a vector store +3. Add files to the vector store + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; + +const model = new ChatOpenAI({ model: "gpt-4.1" }); + +const response = await model.invoke("What is deep research by OpenAI?", { + tools: [ + tools.fileSearch({ + vectorStoreIds: ["vs_abc123"], + // maxNumResults: 5, // Limit results for lower latency + // filters: { type: "eq", key: "category", value: "blog" }, // Metadata filtering + // filters: { type: "and", filters: [ // Compound filters (AND/OR) + // { type: "eq", key: "category", value: "technical" }, + // { type: "gte", key: "year", value: 2024 }, + // ]}, + // rankingOptions: { scoreThreshold: 0.8, ranker: "auto" }, // Customize scoring + }), + ], +}); +``` + +Filter operators: `eq` (equals), `ne` (not equal), `gt` (greater than), `gte` (greater than or equal), `lt` (less than), `lte` (less than or equal). + +For more information, see [OpenAI's File Search Documentation](https://platform.openai.com/docs/guides/tools-file-search). + +### Image Generation Tool + +The Image Generation tool allows models to generate or edit images using text prompts and optional image inputs. It leverages the GPT Image model and automatically optimizes text inputs for improved performance. + +Use Image Generation for: + +- **Creating images from text**: Generate images from detailed text descriptions +- **Editing existing images**: Modify images based on text instructions +- **Multi-turn image editing**: Iteratively refine images across conversation turns +- **Various output formats**: Support for PNG, JPEG, and WebP formats + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; + +const model = new ChatOpenAI({ model: "gpt-4o" }); + +// Basic usage - generate an image +const response = await model.invoke( + "Generate an image of a gray tabby cat hugging an otter with an orange scarf", + { tools: [tools.imageGeneration()] } +); + +// Access the generated image (base64-encoded) +const imageOutput = response.additional_kwargs.tool_outputs?.find( + (output) => output.type === "image_generation_call" +); +if (imageOutput?.result) { + const fs = await import("fs"); + fs.writeFileSync("output.png", Buffer.from(imageOutput.result, "base64")); +} +``` + +**Custom size and quality** - Configure output dimensions and quality: + +```typescript +const response = await model.invoke("Draw a beautiful sunset over mountains", { + tools: [ + tools.imageGeneration({ + size: "1536x1024", // Landscape format (also: "1024x1024", "1024x1536", "auto") + quality: "high", // Quality level (also: "low", "medium", "auto") + }), + ], +}); +``` + +**Output format and compression** - Choose format and compression level: + +```typescript +const response = await model.invoke("Create a product photo", { + tools: [ + tools.imageGeneration({ + outputFormat: "jpeg", // Format (also: "png", "webp") + outputCompression: 90, // Compression 0-100 (for JPEG/WebP) + }), + ], +}); +``` + +**Transparent background** - Generate images with transparency: + +```typescript +const response = await model.invoke( + "Create a logo with transparent background", + { + tools: [ + tools.imageGeneration({ + background: "transparent", // Background type (also: "opaque", "auto") + outputFormat: "png", + }), + ], + } +); +``` + +**Streaming with partial images** - Get visual feedback during generation: + +```typescript +const response = await model.invoke("Draw a detailed fantasy castle", { + tools: [ + tools.imageGeneration({ + partialImages: 2, // Number of partial images (0-3) + }), + ], +}); +``` + +**Force image generation** - Ensure the model uses the image generation tool: + +```typescript +const response = await model.invoke("A serene lake at dawn", { + tools: [tools.imageGeneration()], + tool_choice: { type: "image_generation" }, +}); +``` + +**Multi-turn editing** - Refine images across conversation turns: + +```typescript +// First turn: generate initial image +const response1 = await model.invoke("Draw a red car", { + tools: [tools.imageGeneration()], +}); + +// Second turn: edit the image +const response2 = await model.invoke( + [response1, new HumanMessage("Now change the car color to blue")], + { tools: [tools.imageGeneration()] } +); +``` + +> **Prompting tips**: Use terms like "draw" or "edit" for best results. For combining images, say "edit the first image by adding this element" instead of "combine" or "merge". + +Supported models: `gpt-4o`, `gpt-4o-mini`, `gpt-4.1`, `gpt-4.1-mini`, `gpt-4.1-nano`, `o3` + +For more information, see [OpenAI's Image Generation Documentation](https://platform.openai.com/docs/guides/tools-image-generation). + +### Computer Use Tool + +The Computer Use tool allows models to control computer interfaces by simulating mouse clicks, keyboard input, scrolling, and more. It uses OpenAI's Computer-Using Agent (CUA) model to understand screenshots and suggest actions. + +> **Beta**: Computer use is in beta. Use in sandboxed environments only and do not use for high-stakes or authenticated tasks. Always implement human-in-the-loop for important decisions. + +**How it works**: The tool operates in a continuous loop: + +1. Model sends computer actions (click, type, scroll, etc.) +2. Your code executes these actions in a controlled environment +3. You capture a screenshot of the result +4. Send the screenshot back to the model +5. Repeat until the task is complete + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; + +const model = new ChatOpenAI({ model: "computer-use-preview" }); + +// With execute callback for automatic action handling +const computer = tools.computerUse({ + displayWidth: 1024, + displayHeight: 768, + environment: "browser", + execute: async (action) => { + if (action.type === "screenshot") { + return captureScreenshot(); + } + if (action.type === "click") { + await page.mouse.click(action.x, action.y, { button: action.button }); + return captureScreenshot(); + } + if (action.type === "type") { + await page.keyboard.type(action.text); + return captureScreenshot(); + } + if (action.type === "scroll") { + await page.mouse.move(action.x, action.y); + await page.evaluate( + `window.scrollBy(${action.scroll_x}, ${action.scroll_y})` + ); + return captureScreenshot(); + } + // Handle other actions... + return captureScreenshot(); + }, +}); + +const llmWithComputer = model.bindTools([computer]); +const response = await llmWithComputer.invoke( + "Check the latest news on bing.com" +); +``` + +For more information, see [OpenAI's Computer Use Documentation](https://platform.openai.com/docs/guides/tools-computer-use). + +### Local Shell Tool + +The Local Shell tool allows models to run shell commands locally on a machine you provide. Commands are executed inside your own runtime—the API only returns the instructions. + +> **Security Warning**: Running arbitrary shell commands can be dangerous. Always sandbox execution or add strict allow/deny-lists before forwarding commands to the system shell. +> **Note**: This tool is designed to work with [Codex CLI](https://github.com/openai/codex) and the `codex-mini-latest` model. + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; +import { exec } from "child_process"; +import { promisify } from "util"; + +const execAsync = promisify(exec); +const model = new ChatOpenAI({ model: "codex-mini-latest" }); + +// With execute callback for automatic command handling +const shell = tools.localShell({ + execute: async (action) => { + const { command, env, working_directory, timeout_ms } = action; + const result = await execAsync(command.join(" "), { + cwd: working_directory ?? process.cwd(), + env: { ...process.env, ...env }, + timeout: timeout_ms ?? undefined, + }); + return result.stdout + result.stderr; + }, +}); + +const llmWithShell = model.bindTools([shell]); +const response = await llmWithShell.invoke( + "List files in the current directory" +); +``` + +**Action properties**: The model returns actions with these properties: + +- `command` - Array of argv tokens to execute +- `env` - Environment variables to set +- `working_directory` - Directory to run the command in +- `timeout_ms` - Suggested timeout (enforce your own limits) +- `user` - Optional user to run the command as + +For more information, see [OpenAI's Local Shell Documentation](https://platform.openai.com/docs/guides/tools-local-shell). + +### Shell Tool + +The Shell tool allows models to run shell commands through your integration. Unlike Local Shell, this tool supports executing multiple commands concurrently and is designed for `gpt-5.1`. + +> **Security Warning**: Running arbitrary shell commands can be dangerous. Always sandbox execution or add strict allow/deny-lists before forwarding commands to the system shell. + +**Use cases**: + +- **Automating filesystem or process diagnostics** – e.g., "find the largest PDF under ~/Documents" +- **Extending model capabilities** – Using built-in UNIX utilities, Python runtime, and other CLIs +- **Running multi-step build and test flows** – Chaining commands like `pip install` and `pytest` +- **Complex agentic coding workflows** – Using with `apply_patch` for file operations + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; +import { exec } from "node:child_process/promises"; + +const model = new ChatOpenAI({ model: "gpt-5.1" }); + +// With execute callback for automatic command handling +const shellTool = tools.shell({ + execute: async (action) => { + const outputs = await Promise.all( + action.commands.map(async (cmd) => { + try { + const { stdout, stderr } = await exec(cmd, { + timeout: action.timeout_ms ?? undefined, + }); + return { + stdout, + stderr, + outcome: { type: "exit" as const, exit_code: 0 }, + }; + } catch (error) { + const timedOut = error.killed && error.signal === "SIGTERM"; + return { + stdout: error.stdout ?? "", + stderr: error.stderr ?? String(error), + outcome: timedOut + ? { type: "timeout" as const } + : { type: "exit" as const, exit_code: error.code ?? 1 }, + }; + } + }) + ); + return { + output: outputs, + maxOutputLength: action.max_output_length, + }; + }, +}); + +const llmWithShell = model.bindTools([shellTool]); +const response = await llmWithShell.invoke( + "Find the largest PDF file in ~/Documents" +); +``` + +**Action properties**: The model returns actions with these properties: + +- `commands` - Array of shell commands to execute (can run concurrently) +- `timeout_ms` - Optional timeout in milliseconds (enforce your own limits) +- `max_output_length` - Optional maximum characters to return per command + +**Return format**: Your execute function should return a `ShellResult`: + +```typescript +interface ShellResult { + output: Array<{ + stdout: string; + stderr: string; + outcome: { type: "exit"; exit_code: number } | { type: "timeout" }; + }>; + maxOutputLength?: number | null; // Pass back from action if provided +} +``` + +> **Note**: Only available through the Responses API with `gpt-5.1`. The `timeout_ms` from the model is only a hint—always enforce your own limits. + +For more information, see [OpenAI's Shell Documentation](https://platform.openai.com/docs/guides/tools-shell). + +### Apply Patch Tool + +The Apply Patch tool allows models to propose structured diffs that your integration applies. This enables iterative, multi-step code editing workflows where the model can create, update, and delete files in your codebase. + +**When to use**: + +- **Multi-file refactors** – Rename symbols, extract helpers, or reorganize modules +- **Bug fixes** – Have the model both diagnose issues and emit precise patches +- **Tests & docs generation** – Create new test files, fixtures, and documentation +- **Migrations & mechanical edits** – Apply repetitive, structured updates + +> **Security Warning**: Applying patches can modify files in your codebase. Always validate paths, implement backups, and consider sandboxing. +> **Note**: This tool is designed to work with `gpt-5.1` model. + +```typescript +import { ChatOpenAI, tools } from "@langchain/openai"; +import { applyDiff } from "@openai/agents"; +import * as fs from "fs/promises"; + +const model = new ChatOpenAI({ model: "gpt-5.1" }); + +// With execute callback for automatic patch handling +const patchTool = tools.applyPatch({ + execute: async (operation) => { + if (operation.type === "create_file") { + const content = applyDiff("", operation.diff, "create"); + await fs.writeFile(operation.path, content); + return `Created ${operation.path}`; + } + if (operation.type === "update_file") { + const current = await fs.readFile(operation.path, "utf-8"); + const newContent = applyDiff(current, operation.diff); + await fs.writeFile(operation.path, newContent); + return `Updated ${operation.path}`; + } + if (operation.type === "delete_file") { + await fs.unlink(operation.path); + return `Deleted ${operation.path}`; + } + return "Unknown operation type"; + }, +}); + +const llmWithPatch = model.bindTools([patchTool]); +const response = await llmWithPatch.invoke( + "Rename the fib() function to fibonacci() in lib/fib.py" +); +``` + +**Operation types**: The model returns operations with these properties: + +- `create_file` – Create a new file at `path` with content from `diff` +- `update_file` – Modify an existing file at `path` using V4A diff format in `diff` +- `delete_file` – Remove a file at `path` + +**Best practices**: + +- **Path validation**: Prevent directory traversal and restrict edits to allowed directories +- **Backups**: Consider backing up files before applying patches +- **Error handling**: Return descriptive error messages so the model can recover +- **Atomicity**: Decide whether you want "all-or-nothing" semantics (rollback if any patch fails) + +For more information, see [OpenAI's Apply Patch Documentation](https://platform.openai.com/docs/guides/tools-apply-patch). diff --git a/src/oss/javascript/releases/changelog.mdx b/src/oss/javascript/releases/changelog.mdx index 2ea6a547c4..49c33b3a70 100644 --- a/src/oss/javascript/releases/changelog.mdx +++ b/src/oss/javascript/releases/changelog.mdx @@ -9,6 +9,42 @@ rss: true **Subscribe**: Our changelog includes an [RSS feed](https://docs.langchain.com/oss/javascript/releases/changelog/rss.xml) that can integrate with [Slack](https://slack.com/help/articles/218688467-Add-RSS-feeds-to-Slack), [email](https://zapier.com/apps/email/integrations/rss/1441/send-new-rss-feed-entries-via-email), Discord bots like [Readybot](https://readybot.io/) or [RSS Feeds to Discord Bot](https://rss.app/en/bots/rssfeeds-discord-bot), and other subscription tools. + + ## v1.2.0 + ### `langchain` + * [Structured output](/oss/langchain/structured-output): Added ability to manually set `strict` mode when using `providerStrategy` for structured output. + + ### `@langchain/openai` + * **New provider built-in tools:** Support for [file search](/oss/langchain/tools#file-search), [web search](/oss/langchain/tools#web-search), [code interpreter](/oss/langchain/tools#code-interpreter), [image generation](/oss/langchain/tools#image-generation), [computer use](/oss/langchain/tools#computer-use), [shell](/oss/langchain/tools#shell), and [MCP connector](/oss/langchain/tools#mcp) tools. + * **Content moderation:** New `moderateContent` option on `ChatOpenAI` for detecting and handling unsafe content. + * Prefer responses API for GPT-5.2 Pro model. + + ## v1.3.0 + ### `@langchain/anthropic` + * **New provider built-in tools:** Support for [text editor](/oss/langchain/tools#text-editor), [web fetch](/oss/langchain/tools#web-fetch), [computer use](/oss/langchain/tools#computer-use-1), [tool search](/oss/langchain/tools#tool-search), and [MCP toolset](/oss/langchain/tools#mcp-toolset) tools. + * Exposed `ChatAnthropicInput` type for improved type safety. + + ## v1.1.0 + ### `@langchain/ollama` + * **Native structured outputs:** Added support for native structured output via `withStructuredOutput`. + * Support for custom `baseUrl` configuration. + + ## v1.0.0 + ### `@langchain/community` + * Jira document loader updated to use v3 API. + * LanceDB: Added `similaritySearch()` and `similaritySearchWithScore()` support. + * Elasticsearch hybrid search support. + * New `GoogleCalendarDeleteTool`. + * Various bug fixes for LlamaCppEmbeddings, PrismaVectorStore, IBM WatsonX, and security improvements. + + ### Other packages + * **@langchain/xai:** Native Live Search support. + * **@langchain/tavily:** Added Tavily's research endpoint. + * **@langchain/mongodb:** New MongoDB LLM cache. + * **@langchain/mcp-adapters:** Added `onConnectionError` option. + * **@langchain/google-common:** `jsonSchema` method support in `withStructuredOutput`. + * **@langchain/core:** Security fixes, better subgraph nesting in Mermaid graphs, UUID7 for run IDs. + ## v1.1.0 diff --git a/src/oss/langchain/agents.mdx b/src/oss/langchain/agents.mdx index 6cfb6d218f..ea77c552b7 100644 --- a/src/oss/langchain/agents.mdx +++ b/src/oss/langchain/agents.mdx @@ -72,10 +72,7 @@ To initialize a static model from a str: + """Create a user profile, requesting details via elicitation.""" + result = await ctx.elicit( # [!code highlight] + message=f"Please provide details for {name}'s profile:", # [!code highlight] + schema=UserDetails, # [!code highlight] + ) # [!code highlight] + if result.action == "accept" and result.data: + return f"Created profile for {name}: email={result.data.email}, age={result.data.age}" + if result.action == "decline": + return f"User declined. Created minimal profile for {name}." + return "Profile creation cancelled." + +if __name__ == "__main__": + server.run(transport="http") +``` + +#### Client setup + +Handle elicitation requests by providing a callback to `MultiServerMCPClient`: + +```python Handling elicitation requests +from langchain_mcp_adapters.client import MultiServerMCPClient +from langchain_mcp_adapters.callbacks import Callbacks, CallbackContext +from mcp.shared.context import RequestContext +from mcp.types import ElicitRequestParams, ElicitResult + +async def on_elicitation( + mcp_context: RequestContext, + params: ElicitRequestParams, + context: CallbackContext, +) -> ElicitResult: + """Handle elicitation requests from MCP servers.""" + # In a real application, you would prompt the user for input + # based on params.message and params.requestedSchema + return ElicitResult( # [!code highlight] + action="accept", # [!code highlight] + content={"email": "user@example.com", "age": 25}, # [!code highlight] + ) # [!code highlight] + +client = MultiServerMCPClient( + { + "profile": { + "url": "http://localhost:8000/mcp", + "transport": "http", + } + }, + callbacks=Callbacks(on_elicitation=on_elicitation), # [!code highlight] +) +``` + +#### Response actions + +The elicitation callback can return one of three actions: + +| Action | Description | +|--------|-------------| +| `accept` | User provided valid input. Include the data in the `content` field. | +| `decline` | User chose not to provide the requested information. | +| `cancel` | User cancelled the operation entirely. | + +```python Response action examples +# Accept with data +ElicitResult(action="accept", content={"email": "user@example.com", "age": 25}) + +# Decline (user doesn't want to provide info) +ElicitResult(action="decline") + +# Cancel (abort the operation) +ElicitResult(action="cancel") +``` + ::: ## Additional resources diff --git a/src/oss/langchain/rag.mdx b/src/oss/langchain/rag.mdx index d1d4732cc5..6fbc87e04a 100644 --- a/src/oss/langchain/rag.mdx +++ b/src/oss/langchain/rag.mdx @@ -908,4 +908,4 @@ Now that we've implemented a simple RAG application via @[`createAgent`], we can - Add [conversational memory](/oss/langchain/short-term-memory) to support multi-turn interactions - Add [long-term memory](/oss/langchain/long-term-memory) to support memory across conversational threads - Add [structured responses](/oss/langchain/structured-output) -- Deploy your application with [LangSmith Deployments](/langsmith/deployments) +- Deploy your application with [LangSmith Deployment](/langsmith/deployments) diff --git a/src/oss/langchain/short-term-memory.mdx b/src/oss/langchain/short-term-memory.mdx index 0d8d72a533..e12a6e6a0b 100644 --- a/src/oss/langchain/short-term-memory.mdx +++ b/src/oss/langchain/short-term-memory.mdx @@ -265,35 +265,42 @@ If you'd like me to call you a nickname or use a different name, just say the wo ::: :::js -To trim message history in an agent, use `stateModifier` with the [`trimMessages`](https://js.langchain.com/docs/how_to/trim_messages/) function: +To trim message history in an agent, use @[`createMiddleware`] with a `beforeModel` hook: ```typescript -import { - createAgent, - trimMessages, - type AgentState, -} from "langchain"; -import { MemorySaver } from "@langchain/langgraph"; +import { RemoveMessage } from "@langchain/core/messages"; +import { createAgent, createMiddleware } from "langchain"; +import { MemorySaver, REMOVE_ALL_MESSAGES } from "@langchain/langgraph"; + +const trimMessages = createMiddleware({ + name: "TrimMessages", + beforeModel: (state) => { + const messages = state.messages; + + if (messages.length <= 3) { + return; // No changes needed + } + + const firstMsg = messages[0]; + const recentMessages = + messages.length % 2 === 0 ? messages.slice(-3) : messages.slice(-4); + const newMessages = [firstMsg, ...recentMessages]; -// This function will be called every time before the node that calls LLM -const stateModifier = async (state: AgentState) => { return { - messages: await trimMessages(state.messages, { - strategy: "last", - maxTokens: 384, - startOn: "human", - endOn: ["human", "tool"], - tokenCounter: (msgs) => msgs.length, - }), + messages: [ + new RemoveMessage({ id: REMOVE_ALL_MESSAGES }), + ...newMessages, + ], }; -}; + }, +}); const checkpointer = new MemorySaver(); const agent = createAgent({ - model: "gpt-5", - tools: [], - preModelHook: stateModifier, - checkpointer, + model: "gpt-4o", + tools: [], + middleware: [trimMessages], + checkpointer, }); ``` ::: @@ -420,65 +427,69 @@ for event in agent.stream( :::js ```typescript import { RemoveMessage } from "@langchain/core/messages"; -import { AgentState, createAgent } from "langchain"; +import { createAgent, createMiddleware } from "langchain"; import { MemorySaver } from "@langchain/langgraph"; -const deleteMessages = (state: AgentState) => { +const deleteOldMessages = createMiddleware({ + name: "DeleteOldMessages", + afterModel: (state) => { const messages = state.messages; if (messages.length > 2) { - // remove the earliest two messages - return { + // remove the earliest two messages + return { messages: messages - .slice(0, 2) - .map((m) => new RemoveMessage({ id: m.id! })), - }; + .slice(0, 2) + .map((m) => new RemoveMessage({ id: m.id! })), + }; } - return {}; -}; + return; + }, +}); const agent = createAgent({ - model: "gpt-5-nano", - tools: [], - prompt: "Please be concise and to the point.", - postModelHook: deleteMessages, - checkpointer: new MemorySaver(), + model: "gpt-4o", + tools: [], + systemPrompt: "Please be concise and to the point.", + middleware: [deleteOldMessages], + checkpointer: new MemorySaver(), }); const config = { configurable: { thread_id: "1" } }; const streamA = await agent.stream( - { messages: [{ role: "user", content: "hi! I'm bob" }] }, - { ...config, streamMode: "values" } + { messages: [{ role: "user", content: "hi! I'm bob" }] }, + { ...config, streamMode: "values" } ); for await (const event of streamA) { - const messageDetails = event.messages.map((message) => [ - message.getType(), - message.content, - ]); - console.log(messageDetails); + const messageDetails = event.messages.map((message) => [ + message.getType(), + message.content, + ]); + console.log(messageDetails); } const streamB = await agent.stream( - { - messages: [{ role: "user", content: "what's my name?" }], - }, - { ...config, streamMode: "values" } + { + messages: [{ role: "user", content: "what's my name?" }], + }, + { ...config, streamMode: "values" } ); for await (const event of streamB) { - const messageDetails = event.messages.map((message) => [ - message.getType(), - message.content, - ]); - console.log(messageDetails); + const messageDetails = event.messages.map((message) => [ + message.getType(), + message.content, + ]); + console.log(messageDetails); } ``` ``` -[['human', "hi! I'm bob"]] -[['human', "hi! I'm bob"], ['ai', 'Hi Bob! How are you doing today? Is there anything I can help you with?']] -[['human', "hi! I'm bob"], ['ai', 'Hi Bob! How are you doing today? Is there anything I can help you with?'], ['human', "what's my name?"]] -[['human', "hi! I'm bob"], ['ai', 'Hi Bob! How are you doing today? Is there anything I can help you with?'], ['human', "what's my name?"], ['ai', 'Your name is Bob.']] -[['human', "what's my name?"], ['ai', 'Your name is Bob.']] +[[ "human", "hi! I'm bob" ]] +[[ "human", "hi! I'm bob" ], [ "ai", "Hello, Bob! How can I assist you today?" ]] +[[ "human", "hi! I'm bob" ], [ "ai", "Hello, Bob! How can I assist you today?" ]] +[[ "human", "hi! I'm bob" ], [ "ai", "Hello, Bob! How can I assist you today" ], ["human", "what's my name?" ]] +[[ "human", "hi! I'm bob" ], [ "ai", "Hello, Bob! How can I assist you today?" ], ["human", "what's my name?"], [ "ai", "Your name is Bob, as you mentioned. How can I help you further?" ]] +[[ "human", "what's my name?" ], [ "ai", "Your name is Bob, as you mentioned. How can I help you further?" ]] ``` ::: @@ -612,43 +623,42 @@ print(result["messages"][-1].content) :::js ```typescript import * as z from "zod"; -import { createAgent, tool } from "langchain"; +import { createAgent, tool, type ToolRuntime } from "langchain"; const stateSchema = z.object({ - userId: z.string(), + userId: z.string(), }); const getUserInfo = tool( - async (_, config) => { - const userId = config.context?.userId; - return { userId }; - }, - { - name: "get_user_info", - description: "Get user info", - schema: z.object({}), - } + async (_, config: ToolRuntime>) => { + const userId = config.state.userId; + return userId === "user_123" ? "John Doe" : "Unknown User"; + }, + { + name: "get_user_info", + description: "Get user info", + schema: z.object({}), + } ); const agent = createAgent({ - model: "gpt-5-nano", - tools: [getUserInfo], - stateSchema, + model: "gpt-5-nano", + tools: [getUserInfo], + stateSchema, }); const result = await agent.invoke( - { - messages: [{ role: "user", content: "what's my name?" }], - }, - { - context: { - userId: "user_123", - }, - } + { + messages: [{ role: "user", content: "what's my name?" }], + userId: "user_123", + }, + { + context: {}, + } ); console.log(result.messages.at(-1)?.content); -// Outputs: "User is John Smith." +// Outputs: "Your name is John Doe." ``` ::: @@ -726,61 +736,62 @@ agent.invoke( :::js ```typescript import * as z from "zod"; -import { tool, createAgent } from "langchain"; -import { MessagesZodState, Command } from "@langchain/langgraph"; +import { tool, createAgent, ToolMessage, type ToolRuntime } from "langchain"; +import { Command } from "@langchain/langgraph"; const CustomState = z.object({ - messages: MessagesZodState.shape.messages, - userName: z.string().optional(), + userId: z.string().optional(), }); const updateUserInfo = tool( - async (_, config) => { - const userId = config.context?.userId; - const name = userId === "user_123" ? "John Smith" : "Unknown user"; - return new Command({ - update: { - userName: name, - // update the message history - messages: [ - { - role: "tool", - content: "Successfully looked up user information", - tool_call_id: config.toolCall?.id, - }, - ], - }, - }); - }, - { - name: "update_user_info", - description: "Look up and update user info.", - schema: z.object({}), - } + async (_, config: ToolRuntime) => { + const userId = config.state.userId; + const name = userId === "user_123" ? "John Smith" : "Unknown user"; + return new Command({ + update: { + userName: name, + // update the message history + messages: [ + new ToolMessage({ + content: "Successfully looked up user information", + tool_call_id: config.toolCall?.id ?? "", + }), + ], + }, + }); + }, + { + name: "update_user_info", + description: "Look up and update user info.", + schema: z.object({}), + } ); const greet = tool( - async (_, config) => { - const userName = config.context?.userName; - return `Hello ${userName}!`; - }, - { - name: "greet", - description: "Use this to greet the user once you found their info.", - schema: z.object({}), - } + async (_, config) => { + const userName = config.context?.userName; + return `Hello ${userName}!`; + }, + { + name: "greet", + description: "Use this to greet the user once you found their info.", + schema: z.object({}), + } ); const agent = createAgent({ - model, - tools: [updateUserInfo, greet], - stateSchema: CustomState, + model: "openai:gpt-5-mini", + tools: [updateUserInfo, greet], + stateSchema: CustomState, }); -await agent.invoke( - { messages: [{ role: "user", content: "greet the user" }] }, - { context: { userId: "user_123" } } -); +const result = await agent.invoke({ + messages: [{ role: "user", content: "greet the user" }], + userId: "user_123", +}); + +console.log(result.messages.at(-1)?.content); +// Output: "Hello! I’m here to help — what would you like to do today?" ``` ::: @@ -849,51 +860,50 @@ Hi John Smith, the weather in San Francisco is always sunny! :::js ```typescript import * as z from "zod"; -import { createAgent, tool, SystemMessage } from "langchain"; +import { createAgent, tool, dynamicSystemPromptMiddleware } from "langchain"; const contextSchema = z.object({ - userName: z.string(), + userName: z.string(), }); +type ContextSchema = z.infer; const getWeather = tool( - async ({ city }, config) => { - return `The weather in ${city} is always sunny!`; - }, - { - name: "get_weather", - description: "Get user info", - schema: z.object({ - city: z.string(), - }), - } + async ({ city }) => { + return `The weather in ${city} is always sunny!`; + }, + { + name: "get_weather", + description: "Get user info", + schema: z.object({ + city: z.string(), + }), + } ); const agent = createAgent({ - model: "gpt-5-nano", - tools: [getWeather], - contextSchema, - prompt: (state, config) => { - return [ - new SystemMessage( - `You are a helpful assistant. Address the user as ${config.context?.userName}.` - ), - ...state.messages, - }, + model: "gpt-5-nano", + tools: [getWeather], + contextSchema, + middleware: [ + dynamicSystemPromptMiddleware((_, config) => { + return `You are a helpful assistant. Address the user as ${config.context?.userName}.`; + }), + ], }); const result = await agent.invoke( - { - messages: [{ role: "user", content: "What is the weather in SF?" }], + { + messages: [{ role: "user", content: "What is the weather in SF?" }], + }, + { + context: { + userName: "John Smith", }, - { - context: { - userName: "John Smith", - }, - } + } ); for (const message of result.messages) { - console.log(message); + console.log(message); } /** * HumanMessage { @@ -1018,7 +1028,8 @@ If you'd like me to call you a nickname or use a different name, just say the wo :::js ```typescript import { RemoveMessage } from "@langchain/core/messages"; -import { createAgent, createMiddleware, trimMessages, type AgentState } from "langchain"; +import { createAgent, createMiddleware, trimMessages } from "langchain"; +import { REMOVE_ALL_MESSAGES } from "@langchain/langgraph"; const trimMessageHistory = createMiddleware({ name: "TrimMessages", @@ -1030,14 +1041,16 @@ const trimMessageHistory = createMiddleware({ endOn: ["human", "tool"], tokenCounter: (msgs) => msgs.length, }); - return { messages: trimmed }; + return { + messages: [new RemoveMessage({ id: REMOVE_ALL_MESSAGES }), ...trimmed], + }; }, }); const agent = createAgent({ - model: "gpt-5-nano", - tools: [], - middleware: [trimMessageHistory], + model: "gpt-5-nano", + tools: [], + middleware: [trimMessageHistory], }); ``` ::: @@ -1104,15 +1117,22 @@ agent = create_agent( :::js ```typescript import { RemoveMessage } from "@langchain/core/messages"; -import { createAgent, createMiddleware, type AgentState } from "langchain"; +import { createAgent, createMiddleware } from "langchain"; +import { REMOVE_ALL_MESSAGES } from "@langchain/langgraph"; const validateResponse = createMiddleware({ name: "ValidateResponse", afterModel: (state) => { const lastMessage = state.messages.at(-1)?.content; - if (typeof lastMessage === "string" && lastMessage.toLowerCase().includes("confidential")) { + if ( + typeof lastMessage === "string" && + lastMessage.toLowerCase().includes("confidential") + ) { return { - messages: [new RemoveMessage({ id: "all" }), ...state.messages], + messages: [ + new RemoveMessage({ id: REMOVE_ALL_MESSAGES }), + ...state.messages, + ], }; } return; @@ -1120,9 +1140,9 @@ const validateResponse = createMiddleware({ }); const agent = createAgent({ - model: "gpt-5-nano", - tools: [], - middleware: [validateResponse], + model: "gpt-5-nano", + tools: [], + middleware: [validateResponse], }); ``` ::: diff --git a/src/oss/langchain/structured-output.mdx b/src/oss/langchain/structured-output.mdx index 93b6e5c855..5f1cadae36 100644 --- a/src/oss/langchain/structured-output.mdx +++ b/src/oss/langchain/structured-output.mdx @@ -103,7 +103,11 @@ To use this strategy, configure a `ProviderStrategy`: ```python class ProviderStrategy(Generic[SchemaT]): schema: type[SchemaT] + strict: bool | None = None ``` + + The `strict` param requires `langchain>=1.2`. + The schema defining the structured output format. Supports: @@ -113,6 +117,10 @@ class ProviderStrategy(Generic[SchemaT]): - **JSON Schema**: Dictionary with JSON schema specification + + Optional boolean parameter to enable strict schema adherence. Supported by some providers (e.g., [OpenAI](/oss/integrations/chat/openai) and [xAI](/oss/integrations/chat/xai)). Defaults to `None` (disabled). + + LangChain automatically uses `ProviderStrategy` when you pass a schema type directly to @[`create_agent.response_format`][create_agent(response_format)] and the model supports native structured output: diff --git a/src/oss/langchain/tools.mdx b/src/oss/langchain/tools.mdx index c811ff73bc..a99d762475 100644 --- a/src/oss/langchain/tools.mdx +++ b/src/oss/langchain/tools.mdx @@ -2,17 +2,13 @@ title: Tools --- -Many AI applications interact with users via natural language. However, some use cases require models to interface directly with external systems—such as APIs, databases, or file systems—using structured input. +Tools extend what [agents](/oss/langchain/agents) can do—letting them fetch real-time data, execute code, query external databases, and take actions in the world. -Tools are components that [agents](/oss/langchain/agents) call to perform actions. They extend model capabilities by letting them interact with the world through well-defined inputs and outputs. +Under the hood, tools are callable functions with well-defined inputs and outputs that get passed to a [chat model](/oss/langchain/models). The model decides when to invoke a tool based on the conversation context, and what input arguments to provide. -Tools encapsulate a callable function and its input schema. These can be passed to compatible [chat models](/oss/langchain/models), allowing the model to decide whether to invoke a tool and with what arguments. In these scenarios, tool calling enables models to generate requests that conform to a specified input schema. - - -**Server-side tool use** - -Some chat models (e.g., [OpenAI](/oss/integrations/chat/openai), [Anthropic](/oss/integrations/chat/anthropic), and [Gemini](/oss/integrations/chat/google_generative_ai)) feature [built-in tools](/oss/langchain/models#server-side-tool-use) that are executed server-side, such as web search and code interpreters. Refer to the [provider overview](/oss/integrations/providers/overview) to learn how to access these tools with your specific chat model. - + + For details on how models handle tool calls, see [Tool calling](/oss/langchain/models#tool-calling). + ## Create tools @@ -59,6 +55,12 @@ const searchDatabase = tool( ``` ::: + + **Server-side tool use** + + Some chat models (e.g., [OpenAI](/oss/integrations/chat/openai), [Anthropic](/oss/integrations/chat/anthropic), and [Gemini](/oss/integrations/chat/google_generative_ai)) feature [built-in tools](/oss/langchain/models#server-side-tool-use) that are executed server-side, such as web search and code interpreters. Refer to the [provider overview](/oss/integrations/providers/overview) to learn how to access these tools with your specific chat model. + + :::python ### Customize tool properties @@ -550,15 +552,17 @@ Stream custom updates from tools as they execute using `config.streamWriter`. Th ```ts import * as z from "zod"; -import { tool } from "langchain"; +import { tool, ToolRuntime } from "langchain"; const getWeather = tool( - ({ city }, config) => { - const writer = config.streamWriter; + ({ city }, config: ToolRuntime) => { + const writer = config.writer; // Stream custom updates as the tool executes - writer(`Looking up data for city: ${city}`); - writer(`Acquired data for city: ${city}`); + if (writer) { + writer(`Looking up data for city: ${city}`); + writer(`Acquired data for city: ${city}`); + } return `It's always sunny in ${city}!`; }, diff --git a/src/oss/langgraph/graph-api.mdx b/src/oss/langgraph/graph-api.mdx index 821557d44e..bbfef0f0bf 100644 --- a/src/oss/langgraph/graph-api.mdx +++ b/src/oss/langgraph/graph-api.mdx @@ -963,31 +963,6 @@ When you send updates from a subgraph node to a parent graph node for a key that ::: -:::js -If you are using [subgraphs](/oss/langgraph/use-subgraphs), you might want to navigate from a node within a subgraph to a different subgraph (i.e. a different node in the parent graph). To do so, you can specify `graph: Command.PARENT` in `Command`: - -```typescript -import { Command } from "@langchain/langgraph"; - -graph.addNode("myNode", (state) => { - return new Command({ - update: { foo: "bar" }, - goto: "otherSubgraph", // where `otherSubgraph` is a node in the parent graph - graph: Command.PARENT, - }); -}); -``` - - - -Setting `graph` to `Command.PARENT` will navigate to the closest parent graph. - -When you send updates from a subgraph node to a parent graph node for a key that's shared by both parent and subgraph [state schemas](#schema), you **must** define a [reducer](#reducers) for the key you're updating in the parent graph state. - - - -::: - This is particularly useful when implementing [multi-agent handoffs](/oss/langchain/multi-agent#handoffs). Check out [this guide](/oss/langgraph/use-graph-api#navigate-to-a-node-in-a-parent-graph) for detail. diff --git a/src/oss/langgraph/integrations.mdx b/src/oss/langgraph/integrations.mdx new file mode 100644 index 0000000000..62c19fd7ce --- /dev/null +++ b/src/oss/langgraph/integrations.mdx @@ -0,0 +1,19 @@ +--- +title: LangGraph integrations +--- + +LangGraph integrates with a growing ecosystem of tools and protocols that enhance the capabilities of your graphs and agents. + +## Front-end frameworks + +Create responsive user interfaces that are powered by your graphs with these UI frameworks and tools. + + + + React framework for building AI chat interfaces with streaming support and LangGraph integration. + + + + React framework for building in-app AI copilots with pre-built components, supporting React and Next.js. + + diff --git a/src/oss/python/integrations/callbacks/argilla.mdx b/src/oss/python/integrations/callbacks/argilla.mdx index aacac7328f..7bee87e0ba 100644 --- a/src/oss/python/integrations/callbacks/argilla.mdx +++ b/src/oss/python/integrations/callbacks/argilla.mdx @@ -136,7 +136,7 @@ LLMResult(generations=[[Generation(text='\n\nQ: What did the fish say when he hi Then we can create a chain using a prompt template, and then track the initial prompt and the final response in Argilla. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.callbacks.stdout import StdOutCallbackHandler from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI diff --git a/src/oss/python/integrations/callbacks/confident.mdx b/src/oss/python/integrations/callbacks/confident.mdx index c8deeba1d2..24e781e5ff 100644 --- a/src/oss/python/integrations/callbacks/confident.mdx +++ b/src/oss/python/integrations/callbacks/confident.mdx @@ -105,7 +105,7 @@ We can start by defining a simple chain as shown below. ```python import requests -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_chroma import Chroma from langchain_community.document_loaders import TextLoader from langchain_openai import OpenAI, OpenAIEmbeddings diff --git a/src/oss/python/integrations/callbacks/context.mdx b/src/oss/python/integrations/callbacks/context.mdx index 1abd83a0c7..acd735fadd 100644 --- a/src/oss/python/integrations/callbacks/context.mdx +++ b/src/oss/python/integrations/callbacks/context.mdx @@ -92,7 +92,7 @@ Correct: ```python import os -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_core.prompts.chat import ( ChatPromptTemplate, diff --git a/src/oss/python/integrations/callbacks/infino.mdx b/src/oss/python/integrations/callbacks/infino.mdx index db1165befb..cb7888fb37 100644 --- a/src/oss/python/integrations/callbacks/infino.mdx +++ b/src/oss/python/integrations/callbacks/infino.mdx @@ -206,7 +206,7 @@ Results for king charles III : [{"time":1696947745,"fields":{"prompt_response":" # Set your key here. # os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" -from langchain.chains.summarize import load_summarize_chain +from langchain_classic.chains.summarize import load_summarize_chain from langchain_community.document_loaders import WebBaseLoader from langchain_openai import ChatOpenAI diff --git a/src/oss/python/integrations/callbacks/sagemaker_tracking.mdx b/src/oss/python/integrations/callbacks/sagemaker_tracking.mdx index 534336da37..864f189db5 100644 --- a/src/oss/python/integrations/callbacks/sagemaker_tracking.mdx +++ b/src/oss/python/integrations/callbacks/sagemaker_tracking.mdx @@ -41,7 +41,7 @@ from langchain_community.callbacks.sagemaker_callback import SageMakerCallbackHa ```python from langchain.agents import initialize_agent, load_tools -from langchain.chains import LLMChain, SimpleSequentialChain +from langchain_classic.chains import LLMChain, SimpleSequentialChain from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI from sagemaker.analytics import ExperimentAnalytics diff --git a/src/oss/python/integrations/callbacks/uptrain.mdx b/src/oss/python/integrations/callbacks/uptrain.mdx index 883eb4c5dd..81ab34f75a 100644 --- a/src/oss/python/integrations/callbacks/uptrain.mdx +++ b/src/oss/python/integrations/callbacks/uptrain.mdx @@ -57,7 +57,7 @@ NOTE: that you can also install `faiss-gpu` instead of `faiss-cpu` if you want t ```python from getpass import getpass -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_classic.retrievers.document_compressors import FlashrankRerank from langchain_classic.retrievers.multi_query import MultiQueryRetriever diff --git a/src/oss/python/integrations/chat/anthropic.mdx b/src/oss/python/integrations/chat/anthropic.mdx index 4945133b61..5b2e004ee5 100644 --- a/src/oss/python/integrations/chat/anthropic.mdx +++ b/src/oss/python/integrations/chat/anthropic.mdx @@ -74,92 +74,116 @@ from langchain_anthropic import ChatAnthropic model = ChatAnthropic( model="claude-haiku-4-5-20251001", - temperature=0, - max_tokens=1024, - timeout=None, - max_retries=2, - # other params... + # temperature=, + # max_tokens=, + # timeout=, + # max_retries=, + # ... ) ``` -See the @[`ChatAnthropic`] API reference for details on all available parameters. +See the @[`ChatAnthropic`] API reference for details on all available instantiation parameters. -## Invocation - -```python -messages = [ - ( - "system", - "You are a helpful assistant that translates English to French. Translate the user sentence.", - ), - ("human", "I love programming."), -] -ai_msg = model.invoke(messages) -ai_msg -``` - -```output -AIMessage(content="J'adore la programmation.", response_metadata={'id': 'msg_018Nnu76krRPq8HvgKLW4F8T', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 11}}, id='run-57e9295f-db8a-48dc-9619-babd2bedd891-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40}) -``` +{/* TODO: show use with a proxy or different base_url */} -```python -print(ai_msg.text) -``` +## Invocation -```output -J'adore la programmation. -``` + + + ```python + messages = [ + ( + "system", + "You are a helpful translator. Translate the user sentence to French.", + ), + ( + "human", + "I love programming.", + ), + ] + model.invoke(messages) + ``` + + + ```python + print(ai_msg.text) + ``` + + ```output + J'adore la programmation. + ``` + + + ```python + for chunk in model.stream(messages): + print(chunk.text, end="") + ``` + + ```python + AIMessageChunk(content="J", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="'", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="a", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="ime", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content=" la", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content=" programm", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content="ation", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + AIMessageChunk(content=".", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25") + ``` + + To aggregate the full message from the stream: + + ```python + stream = model.stream(messages) + full = next(stream) + for chunk in stream: + full += chunk + full + ``` + + ```python + AIMessageChunk(content="J'aime la programmation.", id="run-b34faef0-882f-4869-a19c-ed2b856e6361") + ``` + + + ```python + await model.ainvoke(messages) + + # stream + async for chunk in (await model.astream(messages)) + + # batch + await model.abatch([messages]) + ``` + + ```python + AIMessage( + content="J'aime la programmation.", + response_metadata={ + "id": "msg_01Trik66aiQ9Z1higrD5XFx3", + "model": "claude-sonnet-4-5-20250929", + "stop_reason": "end_turn", + "stop_sequence": None, + "usage": {"input_tokens": 25, "output_tokens": 11}, + }, + id="run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0", + usage_metadata={ + "input_tokens": 25, + "output_tokens": 11, + "total_tokens": 36, + }, + ) + ``` + + Learn more about supported invocation methods in our [models](/oss/langchain/models#invocation) guide. -## Token counting - -You can count tokens in messages before sending them to the model using @[`get_num_tokens_from_messages()`][ChatAnthropic.get_num_tokens_from_messages]. This uses Anthropic's official [token counting API](https://platform.claude.com/docs/en/build-with-claude/token-counting). - -```python -from langchain_anthropic import ChatAnthropic -from langchain.messages import HumanMessage, SystemMessage - -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") - -messages = [ - SystemMessage(content="You are a scientist"), - HumanMessage(content="Hello, Claude"), -] - -token_count = model.get_num_tokens_from_messages(messages) -print(token_count) -``` - -```output -14 -``` - -You can also count tokens when using tools: - -```python -from langchain.tools import tool - -@tool(parse_docstring=True) -def get_weather(location: str) -> str: - """Get the current weather in a given location - - Args: - location: The city and state, e.g. San Francisco, CA - """ - return "Sunny" - -messages = [ - HumanMessage(content="What's the weather like in San Francisco?"), -] - -token_count = model.get_num_tokens_from_messages(messages, tools=[get_weather]) -print(token_count) -``` - -```output -586 -``` ## Content blocks @@ -186,7 +210,7 @@ response = model_with_tools.invoke("Which city is hotter today: LA or NY?") response.content ``` -```output +```python [{'text': "I'll help you compare the temperatures of Los Angeles and New York by checking their current weather. I'll retrieve the weather for both cities.", 'type': 'text'}, {'id': 'toolu_01CkMaXrgmsNjTso7so94RJq', @@ -212,7 +236,7 @@ You can also access tool calls specifically in a standard format using the response.tool_calls ``` -```output +```python [{'name': 'GetWeather', 'args': {'location': 'Los Angeles, CA'}, 'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A'}, @@ -231,6 +255,52 @@ See @[`ChatAnthropic.bind_tools`] for details on how to bind tools to your model For information about Claude's built-in tools (code execution, web browsing, files API, etc), see the [Built-in tools](#built-in-tools). +```python +from pydantic import BaseModel, Field + + +class GetWeather(BaseModel): + '''Get the current weather in a given location''' + + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + + +class GetPopulation(BaseModel): + '''Get the current population in a given location''' + + location: str = Field(..., description="The city and state, e.g. San Francisco, CA") + + +model_with_tools = model.bind_tools([GetWeather, GetPopulation]) # [!code highlight] +ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?") +ai_msg.tool_calls +``` + +```python +[ + { + "name": "GetWeather", + "args": {"location": "Los Angeles, CA"}, + "id": "toolu_01KzpPEAgzura7hpBqwHbWdo", + }, + { + "name": "GetWeather", + "args": {"location": "New York, NY"}, + "id": "toolu_01JtgbVGVJbiSwtZk3Uycezx", + }, + { + "name": "GetPopulation", + "args": {"location": "Los Angeles, CA"}, + "id": "toolu_01429aygngesudV9nTbCKGuw", + }, + { + "name": "GetPopulation", + "args": {"location": "New York, NY"}, + "id": "toolu_01JPktyd44tVMeBcPPnFSEJG", + }, +] +``` + ### Strict tool use @@ -398,7 +468,7 @@ Anthropic supports a [token-efficient tool use](https://platform.claude.com/docs print(f"\nTotal tokens: {response.usage_metadata['total_tokens']}") ``` - ```output + ```python [{'name': 'get_weather', 'args': {'location': 'San Francisco'}, 'id': 'toolu_01EoeE1qYaePcmNbUvMsWtmA', 'type': 'tool_call'}] Total tokens: 408 @@ -462,10 +532,66 @@ for chunk in model_with_tools.stream("Write a document about AI"): pass ``` -```output +```python Complete args: {'title': 'Artificial Intelligence: An Overview', 'content': '# Artificial Intelligence: An Overview... ``` +### Programmatic tool calling + + + Programmatic tool calling requires: + + - Claude Sonnet 4.5 or Opus 4.5. + - `langchain-anthropic>=1.3.0` + + You must specify the `advanced-tool-use-2025-11-20` beta header to enable programmatic tool calling. + + +Tools can be configured to be callable from Claude's [code execution](#code-execution) environment, reducing latency and token consumption in contexts involving large data processing or multi-tool workflows. + +Refer to Claude's [programmatic tool calling guide](https://platform.claude.com/docs/en/agents-and-tools/tool-use/programmatic-tool-calling) for details. To use this feature: + +- Include the [code execution](#code-execution) built-in tool in your set of tools +- Specify `extras={"allowed_callers": ["code_execution_20250825"]}` on tools you wish to call programmatically + +See below for a full example with [`create_agent`](/oss/langchain/agents). + + + You can specify `reuse_last_container` on initialization to automatically reuse code execution containers from previous model responses. + + +```python +from langchain.agents import create_agent +from langchain.tools import tool +from langchain_anthropic import ChatAnthropic + + +@tool(extras={"allowed_callers": ["code_execution_20250825"]}) # [!code highlight] +def get_weather(location: str) -> str: + """Get the weather at a location.""" + return "It's sunny." + +tools = [ + {"type": "code_execution_20250825", "name": "code_execution"}, # [!code highlight] + get_weather, +] + +model = ChatAnthropic( + model="claude-sonnet-4-5", + betas=["advanced-tool-use-2025-11-20"], # [!code highlight] + reuse_last_container=True, # [!code highlight] +) + +agent = create_agent(model, tools=tools) + +input_query = { + "role": "user", + "content": "What's the weather in Boston?", +} + +result = agent.invoke({"messages": [input_query]}) +``` + ## Multimodal Claude supports image and PDF inputs as content blocks, both in Anthropic's native format (see docs for [vision](https://platform.claude.com/docs/en/build-with-claude/vision) and [PDF support](https://platform.claude.com/docs/en/build-with-claude/pdf-support)) as well as LangChain's [standard format](/oss/langchain/messages#multimodal). @@ -644,38 +770,36 @@ To use extended thinking, specify the `thinking` parameter when initializing @[` You will need to specify a token budget to use this feature. See usage example below: + ```python Initialization param + import json + from langchain_anthropic import ChatAnthropic -```python Init param -import json -from langchain_anthropic import ChatAnthropic - -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", - max_tokens=5000, - thinking={"type": "enabled", "budget_tokens": 2000}, # [!code highlight] -) - -response = model.invoke("What is the cube root of 50.653?") -print(json.dumps(response.content_blocks, indent=2)) -``` + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + max_tokens=5000, + thinking={"type": "enabled", "budget_tokens": 2000}, # [!code highlight] + ) -```python Invocation param -import json -from langchain_anthropic import ChatAnthropic + response = model.invoke("What is the cube root of 50.653?") + print(json.dumps(response.content_blocks, indent=2)) + ``` -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + ```python Invocation param + import json + from langchain_anthropic import ChatAnthropic -response = model.invoke( - "What is the cube root of 50.653?", - max_tokens=5000, - thinking={"type": "enabled", "budget_tokens": 2000} # [!code highlight] -) -print(json.dumps(response.content_blocks, indent=2)) -``` + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + response = model.invoke( + "What is the cube root of 50.653?", + max_tokens=5000, + thinking={"type": "enabled", "budget_tokens": 2000} # [!code highlight] + ) + print(json.dumps(response.content_blocks, indent=2)) + ``` -```output +```json [ { "type": "reasoning", @@ -683,12 +807,18 @@ print(json.dumps(response.content_blocks, indent=2)) "extras": {"signature": "ErUBCkYIBxgCIkB0UjV..."} }, { - "type": "text" + "type": "text", "text": "The cube root of 50.653 is approximately 3.6998.\n\nTo verify: 3.6998\u00b3 = 50.6530, which is very close to our original number.", } ] ``` + + The Claude Messages API handles thinking differently across Claude Sonnet 3.7 and Claude 4 models. + + Refer to the [Claude docs](https://platform.claude.com/docs/en/build-with-claude/extended-thinking#differences-in-thinking-across-model-versions) for more info. + + ## Effort Certain Claude models support an [effort](https://platform.claude.com/docs/en/build-with-claude/effort) feature, which controls how many tokens Claude uses when responding. This is useful for balancing response quality against latency and cost. @@ -698,7 +828,7 @@ from langchain_anthropic import ChatAnthropic model = ChatAnthropic( model="claude-opus-4-5-20251101", - effort="medium", # [!code highlight] + effort="medium", # # Options: "high", "medium", "low" [!code highlight] ) response = model.invoke("Analyze the trade-offs between microservices and monolithic architectures") @@ -710,114 +840,382 @@ response = model.invoke("Analyze the trade-offs between microservices and monoli See the [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/effort) for detail on when to use different effort levels and to see supported models. -## Prompt caching +## Citations -Anthropic supports [caching](https://platform.claude.com/docs/en/build-with-claude/prompt-caching) of elements of your prompts, including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs. +Anthropic supports a [citations](https://platform.claude.com/docs/en/build-with-claude/citations) feature that lets Claude attach context to its answers based on source documents supplied by the user. -To enable caching on an element of a prompt, mark its associated content block using the `cache_control` key. See examples below: +When [document](https://platform.claude.com/docs/en/build-with-claude/citations#document-types) or `search_result` content blocks with `"citations": {"enabled": True}` are included in a query, Claude may generate citations in its response. - - Only certain Claude models support prompt caching. See the [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/prompt-caching#supported-models) for details. - +### Simple example -### Messages +In this example we pass a [plain text document](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents). In the background, Claude [automatically chunks](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents) the input text into sentences, which are used when generating citations. -```python expandable -import requests +```python from langchain_anthropic import ChatAnthropic - -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") - -# Pull LangChain readme -get_response = requests.get( - "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" -) -readme = get_response.text +model = ChatAnthropic(model="claude-haiku-4-5-20251001") messages = [ { - "role": "system", + "role": "user", "content": [ { - "type": "text", - "text": "You are a technology expert.", - }, - { - "type": "text", - "text": f"{readme}", - "cache_control": {"type": "ephemeral"}, # [!code highlight] + "type": "document", + "source": { + "type": "text", + "media_type": "text/plain", + "data": "The grass is green. The sky is blue.", + }, + "title": "My Document", + "context": "This is a trustworthy document.", + "citations": {"enabled": True}, }, + {"type": "text", "text": "What color is the grass and sky?"}, ], - }, - { - "role": "user", - "content": "What's LangChain, according to its README?", - }, + } ] +response = model.invoke(messages) +response.content +``` -response_1 = model.invoke(messages) -response_2 = model.invoke(messages) - -usage_1 = response_1.usage_metadata["input_token_details"] -usage_2 = response_2.usage_metadata["input_token_details"] - -print(f"First invocation:\n{usage_1}") -print(f"\nSecond:\n{usage_2}") +```python +[{'text': 'Based on the document, ', 'type': 'text'}, + {'text': 'the grass is green', + 'type': 'text', + 'citations': [{'type': 'char_location', + 'cited_text': 'The grass is green. ', + 'document_index': 0, + 'document_title': 'My Document', + 'start_char_index': 0, + 'end_char_index': 20}]}, + {'text': ', and ', 'type': 'text'}, + {'text': 'the sky is blue', + 'type': 'text', + 'citations': [{'type': 'char_location', + 'cited_text': 'The sky is blue.', + 'document_index': 0, + 'document_title': 'My Document', + 'start_char_index': 20, + 'end_char_index': 36}]}, + {'text': '.', 'type': 'text'}] ``` -```output -First invocation: -{'cache_read': 0, 'cache_creation': 1458} +### In tool results (agentic RAG) -Second: -{'cache_read': 1458, 'cache_creation': 0} -``` +Claude supports a [search_result](https://platform.claude.com/docs/en/build-with-claude/search-results) content block representing citable results from queries against a knowledge base or other custom source. These content blocks can be passed to claude both top-line (as in the above example) and within a tool result. This allows Claude to cite elements of its response using the result of a tool call. - -**Extended caching** +To pass search results in response to tool calls, define a tool that returns a list of `search_result` content blocks in Anthropic's native format. For example: - The cache lifetime is 5 minutes by default. If this is too short, you can apply one hour caching by enabling the `"extended-cache-ttl-2025-04-11"` beta header and specifying `"cache_control": {"type": "ephemeral", "ttl": "1h"}` on the message: +```python +def retrieval_tool(query: str) -> list[dict]: + """Access my knowledge base.""" - ```python - model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", - betas=["extended-cache-ttl-2025-04-11"], # [!code highlight] - ) + # Run a search (e.g., with a LangChain vector store) + results = vector_store.similarity_search(query=query, k=2) - messages = [ + # Package results into search_result blocks + return [ { - "role": "user", - "content": [ - { - "type": "text", - "text": f"{long_text}", - "cache_control": {"type": "ephemeral", "ttl": "1h"}, # [!code highlight] - }, - ], + "type": "search_result", + # Customize fields as desired, using document metadata or otherwise + "title": "My Document Title", + "source": "Source description or provenance", + "citations": {"enabled": True}, + "content": [{"type": "text", "text": doc.page_content}], } + for doc in results ] - ``` +``` - Details of cached token counts will be included on the @[`InputTokenDetails`] of response's @[`usage_metadata`][UsageMetadata]: + + Here we demonstrate an end-to-end example in which we populate a LangChain [vector store](/oss/integrations/vectorstores/) with sample documents and equip Claude with a tool that queries those documents. - ```python - response = model.invoke(messages) - response.usage_metadata - ``` - ```json - { - "input_tokens": 1500, - "output_tokens": 200, - "total_tokens": 1700, - "input_token_details": { - "cache_read": 0, - "cache_creation": 1000, - "ephemeral_1h_input_tokens": 750, - "ephemeral_5m_input_tokens": 250, - } + The tool here takes a search query and a `category` string literal, but any valid tool signature can be used. + + This example requires `langchain-openai` and `numpy` to be installed: + + ```bash + pip install langchain-openai numpy + ``` + + ```python + from typing import Literal + + from langchain.chat_models import init_chat_model + from langchain.embeddings import init_embeddings + from langchain_core.documents import Document + from langchain_core.vectorstores import InMemoryVectorStore + from langgraph.checkpoint.memory import InMemorySaver + from langchain.agents import create_agent + + + # Set up vector store + # Ensure you set your OPENAI_API_KEY environment variable + embeddings = init_embeddings("openai:text-embedding-3-small") + vector_store = InMemoryVectorStore(embeddings) + + document_1 = Document( + id="1", + page_content=( + "To request vacation days, submit a leave request form through the " + "HR portal. Approval will be sent by email." + ), + metadata={ + "category": "HR Policy", + "doc_title": "Leave Policy", + "provenance": "Leave Policy - page 1", + }, + ) + document_2 = Document( + id="2", + page_content="Managers will review vacation requests within 3 business days.", + metadata={ + "category": "HR Policy", + "doc_title": "Leave Policy", + "provenance": "Leave Policy - page 2", + }, + ) + document_3 = Document( + id="3", + page_content=( + "Employees with over 6 months tenure are eligible for 20 paid vacation days " + "per year." + ), + metadata={ + "category": "Benefits Policy", + "doc_title": "Benefits Guide 2025", + "provenance": "Benefits Policy - page 1", + }, + ) + + documents = [document_1, document_2, document_3] + vector_store.add_documents(documents=documents) + + + # Define tool + async def retrieval_tool( + query: str, category: Literal["HR Policy", "Benefits Policy"] + ) -> list[dict]: + """Access my knowledge base.""" + + def _filter_function(doc: Document) -> bool: + return doc.metadata.get("category") == category + + results = vector_store.similarity_search( + query=query, k=2, filter=_filter_function + ) + + return [ + { + "type": "search_result", + "title": doc.metadata["doc_title"], + "source": doc.metadata["provenance"], + "citations": {"enabled": True}, + "content": [{"type": "text", "text": doc.page_content}], + } + for doc in results + ] + + + + # Create agent + model = init_chat_model("claude-haiku-4-5-20251001") + + checkpointer = InMemorySaver() + agent = create_agent(model, [retrieval_tool], checkpointer=checkpointer) + + + # Invoke on a query + config = {"configurable": {"thread_id": "session_1"}} + + input_message = { + "role": "user", + "content": "How do I request vacation days?", } + async for step in agent.astream( + {"messages": [input_message]}, + config, + stream_mode="values", + ): + step["messages"][-1].pretty_print() ``` + + +### Using with text splitters + +Anthropic also lets you specify your own splits using [custom document](https://platform.claude.com/docs/en/build-with-claude/citations#custom-content-documents) types. LangChain [text splitters](/oss/integrations/splitters/) can be used to generate meaningful splits for this purpose. See the below example, where we split the LangChain `README.md` (a markdown document) and pass it to Claude as context: + +This example requires @[`langchain-text-splitters`] to be installed: + +```bash +pip install langchain-text-splitters +``` + +```python expandable +import requests +from langchain_anthropic import ChatAnthropic +from langchain_text_splitters import MarkdownTextSplitter + + +def format_to_anthropic_documents(documents: list[str]): + return { + "type": "document", + "source": { + "type": "content", + "content": [{"type": "text", "text": document} for document in documents], + }, + "citations": {"enabled": True}, + } + + +# Pull readme +get_response = requests.get( + "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" +) +readme = get_response.text + +# Split into chunks +splitter = MarkdownTextSplitter( + chunk_overlap=0, + chunk_size=50, +) +documents = splitter.split_text(readme) + +# Construct message +message = { + "role": "user", + "content": [ + format_to_anthropic_documents(documents), + {"type": "text", "text": "Give me a link to LangChain's tutorials."}, + ], +} + +# Query model +model = ChatAnthropic(model="claude-haiku-4-5-20251001") +response = model.invoke([message]) +``` + +## Prompt caching + +Anthropic supports [caching](https://platform.claude.com/docs/en/build-with-claude/prompt-caching) of elements of your prompts, including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs. + +To enable caching on an element of a prompt, mark its associated content block using the `cache_control` key. See examples below: + + + Only certain Claude models support prompt caching. See the [Claude documentation](https://platform.claude.com/docs/en/build-with-claude/prompt-caching#supported-models) for details. + + +### Messages + +```python expandable +import requests +from langchain_anthropic import ChatAnthropic + + +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + +# Pull LangChain readme +get_response = requests.get( + "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" +) +readme = get_response.text + +messages = [ + { + "role": "system", + "content": [ + { + "type": "text", + "text": "You are a technology expert.", + }, + { + "type": "text", + "text": f"{readme}", + "cache_control": {"type": "ephemeral"}, # [!code highlight] + }, + ], + }, + { + "role": "user", + "content": "What's LangChain, according to its README?", + }, +] + +response_1 = model.invoke(messages) +response_2 = model.invoke(messages) + +usage_1 = response_1.usage_metadata["input_token_details"] +usage_2 = response_2.usage_metadata["input_token_details"] + +print(f"First invocation:\n{usage_1}") +print(f"\nSecond:\n{usage_2}") +``` + +```python +First invocation: +{'cache_read': 0, 'cache_creation': 1458} + +Second: +{'cache_read': 1458, 'cache_creation': 0} +``` + +Alternatively, you may enable prompt caching at invocation time. You may want to conditionally cache based on runtime conditions, such as the length of the context. This is useful for app-level decisions about what to cache. + +```python +response = model.invoke( + messages, + cache_control={"type": "ephemeral"}, # [!code highlight] +) +``` + + + **Extended caching** + + The cache lifetime is 5 minutes by default. If this is too short, you can apply one hour caching by enabling the `"extended-cache-ttl-2025-04-11"` beta header and specifying `"cache_control": {"type": "ephemeral", "ttl": "1h"}` on the message. + + + ```python + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + betas=["extended-cache-ttl-2025-04-11"], # [!code highlight] + ) + + messages = [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": f"{long_text}", + "cache_control": {"type": "ephemeral", "ttl": "1h"}, # [!code highlight] + }, + ], + } + ] + ``` + + Details of cached token counts will be included on the @[`InputTokenDetails`] of response's @[`usage_metadata`][UsageMetadata]: + + ```python + response = model.invoke(messages) + response.usage_metadata + ``` + ```json + { + "input_tokens": 1500, + "output_tokens": 200, + "total_tokens": 1700, + "input_token_details": { + "cache_read": 0, + "cache_creation": 1000, + "ephemeral_1h_input_tokens": 750, + "ephemeral_5m_input_tokens": 250, + } + } + ``` + @@ -855,7 +1253,7 @@ print(f"First invocation:\n{usage_1}") print(f"\nSecond:\n{usage_2}") ``` -```output +```python First invocation: {'cache_read': 0, 'cache_creation': 1809} @@ -934,7 +1332,7 @@ Below, we implement a simple chatbot that incorporates this feature. We follow t print(f"\n{output['messages'][-1].usage_metadata['input_token_details']}") ``` - ```output + ```python ================================== Ai Message ================================== Hello, Bob! It's nice to meet you. How are you doing today? Is there something I can help you with? @@ -998,260 +1396,63 @@ Below, we implement a simple chatbot that incorporates this feature. We follow t In the [LangSmith trace](https://smith.langchain.com/public/4d0584d8-5f9e-4b91-8704-93ba2ccf416a/r), toggling "raw output" will show exactly what messages are sent to the chat model, including `cache_control` keys. -## Citations - -Anthropic supports a [citations](https://platform.claude.com/docs/en/build-with-claude/citations) feature that lets Claude attach context to its answers based on source documents supplied by the user. - -When [document](https://platform.claude.com/docs/en/build-with-claude/citations#document-types) or `search_result` content blocks with `"citations": {"enabled": True}` are included in a query, Claude may generate citations in its response. - -### Simple example - -In this example we pass a [plain text document](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents). In the background, Claude [automatically chunks](https://platform.claude.com/docs/en/build-with-claude/citations#plain-text-documents) the input text into sentences, which are used when generating citations. - -```python -from langchain_anthropic import ChatAnthropic - -model = ChatAnthropic(model="claude-haiku-4-5-20251001") - -messages = [ - { - "role": "user", - "content": [ - { - "type": "document", - "source": { - "type": "text", - "media_type": "text/plain", - "data": "The grass is green. The sky is blue.", - }, - "title": "My Document", - "context": "This is a trustworthy document.", - "citations": {"enabled": True}, - }, - {"type": "text", "text": "What color is the grass and sky?"}, - ], - } -] -response = model.invoke(messages) -response.content -``` - -```output -[{'text': 'Based on the document, ', 'type': 'text'}, - {'text': 'the grass is green', - 'type': 'text', - 'citations': [{'type': 'char_location', - 'cited_text': 'The grass is green. ', - 'document_index': 0, - 'document_title': 'My Document', - 'start_char_index': 0, - 'end_char_index': 20}]}, - {'text': ', and ', 'type': 'text'}, - {'text': 'the sky is blue', - 'type': 'text', - 'citations': [{'type': 'char_location', - 'cited_text': 'The sky is blue.', - 'document_index': 0, - 'document_title': 'My Document', - 'start_char_index': 20, - 'end_char_index': 36}]}, - {'text': '.', 'type': 'text'}] -``` - -### In tool results (agentic RAG) - -Claude supports a [search_result](https://platform.claude.com/docs/en/build-with-claude/search-results) content block representing citable results from queries against a knowledge base or other custom source. These content blocks can be passed to claude both top-line (as in the above example) and within a tool result. This allows Claude to cite elements of its response using the result of a tool call. - -To pass search results in response to tool calls, define a tool that returns a list of `search_result` content blocks in Anthropic's native format. For example: - -```python -def retrieval_tool(query: str) -> list[dict]: - """Access my knowledge base.""" - - # Run a search (e.g., with a LangChain vector store) - results = vector_store.similarity_search(query=query, k=2) - - # Package results into search_result blocks - return [ - { - "type": "search_result", - # Customize fields as desired, using document metadata or otherwise - "title": "My Document Title", - "source": "Source description or provenance", - "citations": {"enabled": True}, - "content": [{"type": "text", "text": doc.page_content}], - } - for doc in results - ] -``` - - - Here we demonstrate an end-to-end example in which we populate a LangChain [vector store](/oss/integrations/vectorstores/) with sample documents and equip Claude with a tool that queries those documents. - - The tool here takes a search query and a `category` string literal, but any valid tool signature can be used. - - This example requires `langchain-openai` and `numpy` to be installed: - - ```bash - pip install langchain-openai numpy - ``` - - ```python - from typing import Literal - - from langchain.chat_models import init_chat_model - from langchain.embeddings import init_embeddings - from langchain_core.documents import Document - from langchain_core.vectorstores import InMemoryVectorStore - from langgraph.checkpoint.memory import InMemorySaver - from langchain.agents import create_agent - - - # Set up vector store - # Ensure you set your OPENAI_API_KEY environment variable - embeddings = init_embeddings("openai:text-embedding-3-small") - vector_store = InMemoryVectorStore(embeddings) - - document_1 = Document( - id="1", - page_content=( - "To request vacation days, submit a leave request form through the " - "HR portal. Approval will be sent by email." - ), - metadata={ - "category": "HR Policy", - "doc_title": "Leave Policy", - "provenance": "Leave Policy - page 1", - }, - ) - document_2 = Document( - id="2", - page_content="Managers will review vacation requests within 3 business days.", - metadata={ - "category": "HR Policy", - "doc_title": "Leave Policy", - "provenance": "Leave Policy - page 2", - }, - ) - document_3 = Document( - id="3", - page_content=( - "Employees with over 6 months tenure are eligible for 20 paid vacation days " - "per year." - ), - metadata={ - "category": "Benefits Policy", - "doc_title": "Benefits Guide 2025", - "provenance": "Benefits Policy - page 1", - }, - ) - - documents = [document_1, document_2, document_3] - vector_store.add_documents(documents=documents) - +## Token counting - # Define tool - async def retrieval_tool( - query: str, category: Literal["HR Policy", "Benefits Policy"] - ) -> list[dict]: - """Access my knowledge base.""" +You can count tokens in messages before sending them to the model using @[`get_num_tokens_from_messages()`][ChatAnthropic.get_num_tokens_from_messages]. This uses Anthropic's official [token counting API](https://platform.claude.com/docs/en/build-with-claude/token-counting). - def _filter_function(doc: Document) -> bool: - return doc.metadata.get("category") == category + + + ```python + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, SystemMessage - results = vector_store.similarity_search( - query=query, k=2, filter=_filter_function - ) + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") - return [ - { - "type": "search_result", - "title": doc.metadata["doc_title"], - "source": doc.metadata["provenance"], - "citations": {"enabled": True}, - "content": [{"type": "text", "text": doc.page_content}], - } - for doc in results + messages = [ + SystemMessage(content="You are a scientist"), + HumanMessage(content="Hello, Claude"), ] + token_count = model.get_num_tokens_from_messages(messages) + print(token_count) + ``` + + ```output + 14 + ``` + + + You can also count tokens when using tools: + + ```python + from langchain.tools import tool + + @tool(parse_docstring=True) + def get_weather(location: str) -> str: + """Get the current weather in a given location + + Args: + location: The city and state, e.g. San Francisco, CA + """ + return "Sunny" + + messages = [ + HumanMessage(content="What's the weather like in San Francisco?"), + ] + token_count = model.get_num_tokens_from_messages(messages, tools=[get_weather]) + print(token_count) + ``` - # Create agent - model = init_chat_model("claude-haiku-4-5-20251001") - - checkpointer = InMemorySaver() - agent = create_agent(model, [retrieval_tool], checkpointer=checkpointer) - - - # Invoke on a query - config = {"configurable": {"thread_id": "session_1"}} - - input_message = { - "role": "user", - "content": "How do I request vacation days?", - } - async for step in agent.astream( - {"messages": [input_message]}, - config, - stream_mode="values", - ): - step["messages"][-1].pretty_print() - ``` - - -### Using with text splitters - -Anthropic also lets you specify your own splits using [custom document](https://platform.claude.com/docs/en/build-with-claude/citations#custom-content-documents) types. LangChain [text splitters](/oss/integrations/splitters/) can be used to generate meaningful splits for this purpose. See the below example, where we split the LangChain `README.md` (a markdown document) and pass it to Claude as context: - -This example requires @[`langchain-text-splitters`] to be installed: - -```bash -pip install langchain-text-splitters -``` - -```python expandable -import requests -from langchain_anthropic import ChatAnthropic -from langchain_text_splitters import MarkdownTextSplitter - - -def format_to_anthropic_documents(documents: list[str]): - return { - "type": "document", - "source": { - "type": "content", - "content": [{"type": "text", "text": document} for document in documents], - }, - "citations": {"enabled": True}, - } - - -# Pull readme -get_response = requests.get( - "https://raw.githubusercontent.com/langchain-ai/langchain/master/README.md" -) -readme = get_response.text - -# Split into chunks -splitter = MarkdownTextSplitter( - chunk_overlap=0, - chunk_size=50, -) -documents = splitter.split_text(readme) - -# Construct message -message = { - "role": "user", - "content": [ - format_to_anthropic_documents(documents), - {"type": "text", "text": "Give me a link to LangChain's tutorials."}, - ], -} - -# Query model -model = ChatAnthropic(model="claude-haiku-4-5-20251001") -response = model.invoke([message]) -``` + ```output + 586 + ``` + + ## Context management @@ -1262,7 +1463,7 @@ See the [Claude documentation](https://platform.claude.com/docs/en/build-with-cl **Context management is supported since `langchain-anthropic>=0.3.21`** - You must speficy the `context-management-2025-06-27` beta header to apply context management to your model calls. + You must specify the `context-management-2025-06-27` beta header to apply context management to your model calls. ```python @@ -1349,7 +1550,7 @@ response = model_with_structure.invoke("Provide details about the movie Inceptio response ``` -```output +```python Movie(title='Inception', year=2010, director='Christopher Nolan', rating=8.8) ``` @@ -1385,19 +1586,39 @@ result = agent.invoke({ result["structured_response"] ``` -```output +```python Weather(temperature=75.0, condition='Sunny') ``` - + + +## Built-in tools + +Anthropic supports a variety of built-in client and server-side [tools](/oss/langchain/tools/). + +Server-side tools (e.g., [web search](#web-search)) are passed to the model and executed by Anthropic. Client-side tools (e.g., [bash tool](#bash-tool)) require you to implement the callback execution logic in your application and return results to the model. + +In either case, you make tools accessible to your chat model by using @[`bind_tools`][ChatAnthropic.bind_tools] on the model instance. + +Importantly, client-side tools require you to implement the execution logic. See the relevant sections below for examples. + + + **Middleware vs tools** + + For client-side tools (e.g. [bash](#bash-tool), [text editor](#text-editor), [memory](#memory-tool)), you may opt to use [middleware](/oss/integrations/middleware/anthropic), which provide production-ready implementations that contain built-in execution, state management, and security policies. + + Use middleware when you want a turnkey solution; use tools (documented below) when you need custom execution logic or want to use @[`bind_tools`][ChatAnthropic.bind_tools] directly. + -## Built-in tools + + **Beta tools** -Anthropic supports a variety of [built-in tools](https://platform.claude.com/docs/en/agents-and-tools/tool-use/text-editor-tool), which can be bound to the model in the [usual way](/oss/langchain/tools/). Claude will generate tool calls adhering to its internal schema for the tool. + If binding a beta tool to your chat model, LangChain will automatically add the required beta header for you. + ### Bash tool -Claude supports a [bash tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/bash-tool) that allows it to execute shell commands in a persistent bash session. This enables system operations, script execution, and command-line automation. +Claude supports a client-side [bash tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/bash-tool) that allows it to execute shell commands in a persistent bash session. This enables system operations, script execution, and command-line automation. **Important: You must provide the execution environment** @@ -1417,40 +1638,149 @@ Claude supports a [bash tool](https://platform.claude.com/docs/en/agents-and-too - Claude 4 models or Claude Sonnet 3.7 -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + import subprocess -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + from anthropic.types.beta import BetaToolBash20250124Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool -bash_tool = { - "type": "bash_20250124", - "name": "bash", -} + tool_spec = BetaToolBash20250124Param( # [!code highlight] + name="bash", # [!code highlight] + type="bash_20250124", # [!code highlight] + ) # [!code highlight] -model_with_bash = model.bind_tools([bash_tool]) -response = model_with_bash.invoke( - "List all Python files in the current directory" -) -``` -`response.tool_calls` will contain the bash command Claude wants to execute. You must run this command in your environment and pass the result back. + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def bash(*, command: str, restart: bool = False, **kw): + """Execute a bash command.""" + if restart: + return "Bash session restarted" + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=30, + ) + return result.stdout + result.stderr + except Exception as e: + return f"Error: {e}" + + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_bash = model.bind_tools([bash]) # [!code highlight] + + # Initial request + messages = [HumanMessage("List all files in the current directory")] + response = model_with_bash.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + # Execute each tool call + tool_messages = [] + for tool_call in response.tool_calls: + result = bash.invoke(tool_call) + tool_messages.append(result) + + # Continue conversation with tool results + messages = [*messages, response, *tool_messages] + response = model_with_bash.invoke(messages) + print(response.content_blocks) + ``` + + + + ```python expandable + import subprocess + + from anthropic.types.beta import BetaToolBash20250124Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + tool_spec = BetaToolBash20250124Param( # [!code highlight] + name="bash", # [!code highlight] + type="bash_20250124", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def bash(*, command: str, restart: bool = False, **kw): + """Execute a bash command.""" + if restart: + return "Bash session restarted" + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + ) + return result.stdout + result.stderr + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[bash], # [!code highlight] + ) + + result = agent.invoke({"messages": [{"role": "user", "content": "List files"}]}) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + bash_tool = { # [!code highlight] + "type": "bash_20250124", # [!code highlight] + "name": "bash", # [!code highlight] + } # [!code highlight] + + model_with_bash = model.bind_tools([bash_tool]) # [!code highlight] + response = model_with_bash.invoke( + "List all Python files in the current directory" + ) + # You must handle execution of the bash command in response.tool_calls via a tool execution loop + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + `response.tool_calls` will contain the bash command Claude wants to execute. You must run this command in your environment and pass the result back. + + ```python + [{'type': 'text', + 'text': "I'll list the Python files in the current directory for you."}, + {'type': 'tool_call', + 'name': 'bash', + 'args': {'command': 'ls -la *.py'}, + 'id': 'toolu_01ABC123...'}] + ``` + + -```output -[{'type': 'text', - 'text': "I'll list the Python files in the current directory for you."}, - {'type': 'tool_call', - 'name': 'bash', - 'args': {'command': 'ls -la *.py'}, - 'id': 'toolu_01ABC123...'}] -``` The bash tool supports two parameters: - `command` (required): The bash command to execute - `restart` (optional): Set to `true` to restart the bash session + + For a "batteries-included" implementation, consider using [`ClaudeBashToolMiddleware`](/oss/integrations/middleware/anthropic#bash-tool) which provides persistent sessions, Docker isolation, output redaction, and startup/shutdown commands out of the box. + + ### Code execution -Claude can use a [code execution tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool) to execute code in a sandboxed environment. +Claude can use a server-side [code execution tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool) to execute code in a sandboxed environment. Anthropic's `2025-08-25` code execution tools are supported since `langchain-anthropic>=1.0.3`. @@ -1462,91 +1792,146 @@ Claude can use a [code execution tool](https://platform.claude.com/docs/en/agent The code sandbox does not have internet access, thus you may only use packages that are pre-installed in the environment. See the [Claude docs](https://platform.claude.com/docs/en/agents-and-tools/tool-use/code-execution-tool#networking-and-security) for more info. -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaCodeExecutionTool20250825Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + # (Optional) Enable the param below to automatically + # pass back in container IDs from previous response + reuse_last_container=True, + ) -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", -) + code_tool = BetaCodeExecutionTool20250825Param( # [!code highlight] + name="code_execution", # [!code highlight] + type="code_execution_20250825", # [!code highlight] + ) # [!code highlight] + model_with_tools = model.bind_tools([code_tool]) # [!code highlight] -tool = {"type": "code_execution_20250825", "name": "code_execution"} # [!code highlight] -model_with_tools = model.bind_tools([tool]) + response = model_with_tools.invoke( + "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" + ) + ``` + + + + ```python + from anthropic.types.beta import BetaCodeExecutionTool20250825Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + code_tool = BetaCodeExecutionTool20250825Param( # [!code highlight] + name="code_execution", # [!code highlight] + type="code_execution_20250825", # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[code_tool], # [!code highlight] + ) -response = model_with_tools.invoke( - "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" -) -``` + result = agent.invoke({ + "messages": [{"role": "user", "content": "Calculate mean and std of [1,2,3,4,5]"}] + }) - + for message in result["messages"]: + message.pretty_print() + ``` + -Using the Files API, Claude can write code to access files for data analysis and other purposes. See example below: + + ```python + from langchain_anthropic import ChatAnthropic -```python -import anthropic -from langchain_anthropic import ChatAnthropic + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + ) + code_tool = {"type": "code_execution_20250825", "name": "code_execution"} # [!code highlight] + model_with_tools = model.bind_tools([code_tool]) -client = anthropic.Anthropic() -file = client.beta.files.upload( - file=open("/path/to/sample_data.csv", "rb") -) -file_id = file.id + response = model_with_tools.invoke( + "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]" + ) + ``` + + + + Using the Files API, Claude can write code to access files for data analysis and other purposes. See example below: -# Run inference -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", -) + ```python + import anthropic + from anthropic.types.beta import BetaCodeExecutionTool20250825Param # [!code highlight] + from langchain_anthropic import ChatAnthropic -tool = {"type": "code_execution_20250825", "name": "code_execution"} # [!code highlight] -model_with_tools = model.bind_tools([tool]) -input_message = { - "role": "user", - "content": [ - { - "type": "text", - "text": "Please plot these data and tell me what you see.", - }, - { - "type": "container_upload", - "file_id": file_id, - }, - ] -} -response = model_with_tools.invoke([input_message]) -``` + client = anthropic.Anthropic() + file = client.beta.files.upload( + file=open("/path/to/sample_data.csv", "rb") + ) + file_id = file.id -Note that Claude may generate files as part of its code execution. You can access these files using the Files API: -```python -# Take all file outputs for demonstration purposes -file_ids = [] -for block in response.content: - if block["type"] == "bash_code_execution_tool_result": - file_ids.extend( - content["file_id"] - for content in block.get("content", {}).get("content", []) - if "file_id" in content - ) + # Run inference + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + ) -for i, file_id in enumerate(file_ids): - file_content = client.beta.files.download(file_id) - file_content.write_to_file(f"/path/to/file_{i}.png") -``` + code_tool = BetaCodeExecutionTool20250825Param( # [!code highlight] + name="code_execution", # [!code highlight] + type="code_execution_20250825", # [!code highlight] + ) # [!code highlight] + model_with_tools = model.bind_tools([code_tool]) - - **Available tool versions:** + input_message = { + "role": "user", + "content": [ + { + "type": "text", + "text": "Please plot these data and tell me what you see.", + }, + { + "type": "container_upload", + "file_id": file_id, + }, + ] + } + response = model_with_tools.invoke([input_message]) + ``` - - `code_execution_20250522` (legacy) - - `code_execution_20250825` (recommended) - + Note that Claude may generate files as part of its code execution. You can access these files using the Files API: + + ```python + # Take all file outputs for demonstration purposes + file_ids = [] + for block in response.content: + if block["type"] == "bash_code_execution_tool_result": + file_ids.extend( + content["file_id"] + for content in block.get("content", {}).get("content", []) + if "file_id" in content + ) + + for i, file_id in enumerate(file_ids): + file_content = client.beta.files.download(file_id) + file_content.write_to_file(f"/path/to/file_{i}.png") + ``` + + **Available tool versions:** + + - `code_execution_20250522` (legacy) + - `code_execution_20250825` (recommended) + ### Computer use -Claude supports [computer use](https://platform.claude.com/docs/en/agents-and-tools/tool-use/computer-use-tool) capabilities, allowing it to interact with desktop environments through screenshots, mouse control, and keyboard input. +Claude supports client-side [computer use](https://platform.claude.com/docs/en/agents-and-tools/tool-use/computer-use-tool) capabilities, allowing it to interact with desktop environments through screenshots, mouse control, and keyboard input. **Important: You must provide the execution environment** @@ -1566,36 +1951,165 @@ Claude supports [computer use](https://platform.claude.com/docs/en/agents-and-to - Claude Opus 4.5, Claude 4, or Claude Sonnet 3.7 -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + import base64 + from typing import Literal + + from anthropic.types.beta import BetaToolComputerUse20250124Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool + + DISPLAY_WIDTH = 1024 + DISPLAY_HEIGHT = 768 + + tool_spec = BetaToolComputerUse20250124Param( # [!code highlight] + name="computer", # [!code highlight] + type="computer_20250124", # [!code highlight] + display_width_px=DISPLAY_WIDTH, # [!code highlight] + display_height_px=DISPLAY_HEIGHT, # [!code highlight] + display_number=1, # [!code highlight] + ) # [!code highlight] + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def computer( + *, + action: Literal[ + "key", "type", "mouse_move", "left_click", "left_click_drag", + "right_click", "middle_click", "double_click", "screenshot", + "cursor_position", "scroll" + ], + coordinate: list[int] | None = None, + text: str | None = None, + **kw + ): + """Control the computer display.""" + if action == "screenshot": + # Take screenshot and return base64-encoded image + # Implementation depends on your display setup (e.g., Xvfb, pyautogui) + return {"type": "image", "data": "base64_screenshot_data..."} + elif action == "left_click" and coordinate: + # Execute click at coordinate + return f"Clicked at {coordinate}" + elif action == "type" and text: + # Type text + return f"Typed: {text}" + # ... implement other actions + return f"Executed {action}" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_computer = model.bind_tools([computer]) # [!code highlight] + + # Initial request + messages = [HumanMessage("Take a screenshot to see what's on the screen")] + response = model_with_computer.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + tool_messages = [] + for tool_call in response.tool_calls: + result = computer.invoke(tool_call["args"]) + tool_messages.append( + ToolMessage(content=str(result), tool_call_id=tool_call["id"]) + ) + + messages = [*messages, response, *tool_messages] + response = model_with_computer.invoke(messages) + print(response.content_blocks) + ``` + + + + ```python expandable + from typing import Literal + + from anthropic.types.beta import BetaToolComputerUse20250124Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + tool_spec = BetaToolComputerUse20250124Param( # [!code highlight] + name="computer", # [!code highlight] + type="computer_20250124", # [!code highlight] + display_width_px=1024, # [!code highlight] + display_height_px=768, # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def computer( + *, + action: Literal[ + "key", "type", "mouse_move", "left_click", "left_click_drag", + "right_click", "middle_click", "double_click", "screenshot", + "cursor_position", "scroll" + ], + coordinate: list[int] | None = None, + text: str | None = None, + **kw + ): + """Control the computer display.""" + if action == "screenshot": + return {"type": "image", "data": "base64_screenshot_data..."} + elif action == "left_click" and coordinate: + return f"Clicked at {coordinate}" + elif action == "type" and text: + return f"Typed: {text}" + return f"Executed {action}" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[computer], # [!code highlight] + ) -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + result = agent.invoke({ + "messages": [{"role": "user", "content": "Take a screenshot"}] + }) -# LangChain handles the API call and tool binding -computer_tool = { - "type": "computer_20250124", - "name": "computer", - "display_width_px": 1024, - "display_height_px": 768, - "display_number": 1, -} + for message in result["messages"]: + message.pretty_print() + ``` + -model_with_computer = model.bind_tools([computer_tool]) -response = model_with_computer.invoke( - "Take a screenshot to see what's on the screen" -) -``` + + ```python + from langchain_anthropic import ChatAnthropic -`response.tool_calls` will contain the computer action Claude wants to perform. You must execute this action in your environment and pass the result back. + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") -```output -[{'type': 'text', - 'text': "I'll take a screenshot to see what's currently on the screen."}, - {'type': 'tool_call', - 'name': 'computer', - 'args': {'action': 'screenshot'}, - 'id': 'toolu_01RNsqAE7dDZujELtacNeYv9'}] -``` + computer_tool = { + "type": "computer_20250124", + "name": "computer", + "display_width_px": 1024, + "display_height_px": 768, + "display_number": 1, + } + + model_with_computer = model.bind_tools([computer_tool]) # [!code highlight] + response = model_with_computer.invoke( + "Take a screenshot to see what's on the screen" + ) + # You must handle execution of the computer actions in response.tool_calls via a tool execution loop + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + `response.tool_calls` will contain the computer action Claude wants to perform. You must execute this action in your environment and pass the result back. + + ```python + [{'type': 'text', + 'text': "I'll take a screenshot to see what's currently on the screen."}, + {'type': 'tool_call', + 'name': 'computer', + 'args': {'action': 'screenshot'}, + 'id': 'toolu_01RNsqAE7dDZujELtacNeYv9'}] + ``` + + **Available tool versions:** @@ -1606,69 +2120,274 @@ response = model_with_computer.invoke( ### Remote MCP -Claude can use a [MCP connector tool](https://platform.claude.com/docs/en/agents-and-tools/mcp-connector) for model-generated calls to remote MCP servers. +Claude can use a server-side [MCP connector tool](https://platform.claude.com/docs/en/agents-and-tools/mcp-connector) for model-generated calls to remote MCP servers. **Remote MCP is supported since `langchain-anthropic>=0.3.14`** -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaMCPToolsetParam # [!code highlight] + from langchain_anthropic import ChatAnthropic -mcp_servers = [ - { - "type": "url", - "url": "https://docs.langchain.com/mcp", - "name": "LangChain Docs", - # "tool_configuration": { # optional configuration - # "enabled": True, - # "allowed_tools": ["ask_question"], - # }, - # "authorization_token": "PLACEHOLDER", # optional authorization if needed - } -] + mcp_servers = [ + { + "type": "url", + "url": "https://docs.langchain.com/mcp", + "name": "LangChain Docs", + } + ] -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", - mcp_servers=mcp_servers, # [!code highlight] -) + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + mcp_servers=mcp_servers, # [!code highlight] + ) -response = model.invoke( - "What are LangChain content blocks?", - tools=[{"type": "mcp_toolset", "mcp_server_name": "LangChain Docs"}], # [!code highlight] -) -response.content_blocks -``` + mcp_tool = BetaMCPToolsetParam( # [!code highlight] + type="mcp_toolset", # [!code highlight] + mcp_server_name="LangChain Docs", # [!code highlight] + ) # [!code highlight] + + response = model.invoke( + "What are LangChain content blocks?", + tools=[mcp_tool], # [!code highlight] + ) + ``` + + + + ```python + from anthropic.types.beta import BetaMCPToolsetParam # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + mcp_servers = [ + { + "type": "url", + "url": "https://docs.langchain.com/mcp", + "name": "LangChain Docs", + } + ] + + mcp_tool = BetaMCPToolsetParam( # [!code highlight] + type="mcp_toolset", # [!code highlight] + mcp_server_name="LangChain Docs", # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic( + model="claude-sonnet-4-5-20250929", + mcp_servers=mcp_servers, # [!code highlight] + ), + tools=[mcp_tool], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "What are LangChain content blocks?"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + mcp_servers = [ + { + "type": "url", + "url": "https://docs.langchain.com/mcp", + "name": "LangChain Docs", + # "tool_configuration": { # optional configuration + # "enabled": True, + # "allowed_tools": ["ask_question"], + # }, + # "authorization_token": "PLACEHOLDER", # optional authorization if needed + } + ] + + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + mcp_servers=mcp_servers, # [!code highlight] + ) + + response = model.invoke( + "What are LangChain content blocks?", + tools=[{"type": "mcp_toolset", "mcp_server_name": "LangChain Docs"}], # [!code highlight] + ) + response.content_blocks + ``` + + ### Text editor -The text editor tool can be used to view and modify text files. See docs [here](https://platform.claude.com/docs/en/agents-and-tools/tool-use/text-editor-tool) for details. +Claude supports a client-side text editor tool can be used to view and modify text local files. See docs [here](https://platform.claude.com/docs/en/agents-and-tools/tool-use/text-editor-tool) for details. -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + from typing import Literal -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + from anthropic.types.beta import BetaToolTextEditor20250728Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool -tool = {"type": "text_editor_20250728", "name": "str_replace_based_edit_tool"} -model_with_tools = model.bind_tools([tool]) + tool_spec = BetaToolTextEditor20250728Param( # [!code highlight] + name="str_replace_based_edit_tool", # [!code highlight] + type="text_editor_20250728", # [!code highlight] + ) # [!code highlight] -response = model_with_tools.invoke( - "There's a syntax error in my primes.py file. Can you help me fix it?" -) -print(response.text) -response.tool_calls -``` + # Simple in-memory file storage for demonstration + files: dict[str, str] = { + "/workspace/primes.py": "def is_prime(n):\n if n < 2\n return False\n return True" + } -```output -I'll help you fix the syntax error in your primes.py file. Let me first take a look at the file to identify the issue. -``` + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def str_replace_based_edit_tool( + *, + command: Literal["view", "create", "str_replace", "insert", "undo_edit"], + path: str, + file_text: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + insert_line: int | None = None, + view_range: list[int] | None = None, + **kw + ): + """View and edit text files.""" + if command == "view": + if path not in files: + return f"Error: File {path} not found" + content = files[path] + if view_range: + lines = content.splitlines() + start, end = view_range[0] - 1, view_range[1] + return "\n".join(lines[start:end]) + return content + elif command == "create": + files[path] = file_text or "" + return f"Created {path}" + elif command == "str_replace" and old_str is not None: + if path not in files: + return f"Error: File {path} not found" + files[path] = files[path].replace(old_str, new_str or "", 1) + return f"Replaced in {path}" + # ... implement other commands + return f"Executed {command} on {path}" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_tools = model.bind_tools([str_replace_based_edit_tool]) # [!code highlight] + + # Initial request + messages = [HumanMessage("There's a syntax error in my primes.py file. Can you fix it?")] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + tool_messages = [] + for tool_call in response.tool_calls: + result = str_replace_based_edit_tool.invoke(tool_call["args"]) + tool_messages.append( + ToolMessage(content=result, tool_call_id=tool_call["id"]) + ) + + messages = [*messages, response, *tool_messages] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + ``` + + + + ```python expandable + from typing import Literal + + from anthropic.types.beta import BetaToolTextEditor20250728Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + # Simple in-memory file storage + files: dict[str, str] = { + "/workspace/primes.py": "def is_prime(n):\n if n < 2\n return False\n return True" + } -```output -[{'name': 'str_replace_based_edit_tool', - 'args': {'command': 'view', 'path': '/root'}, - 'id': 'toolu_011BG5RbqnfBYkD8qQonS9k9', - 'type': 'tool_call'}] -``` + tool_spec = BetaToolTextEditor20250728Param( # [!code highlight] + name="str_replace_based_edit_tool", # [!code highlight] + type="text_editor_20250728", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def str_replace_based_edit_tool( + *, + command: Literal["view", "create", "str_replace", "insert", "undo_edit"], + path: str, + file_text: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + **kw + ): + """View and edit text files.""" + if command == "view": + return files.get(path, f"Error: File {path} not found") + elif command == "create": + files[path] = file_text or "" + return f"Created {path}" + elif command == "str_replace" and old_str is not None: + if path not in files: + return f"Error: File {path} not found" + files[path] = files[path].replace(old_str, new_str or "", 1) + return f"Replaced in {path}" + return f"Executed {command} on {path}" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[str_replace_based_edit_tool], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "Fix the syntax error in /workspace/primes.py"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + editor_tool = {"type": "text_editor_20250728", "name": "str_replace_based_edit_tool"} # [!code highlight] + + model_with_tools = model.bind_tools([editor_tool]) # [!code highlight] + + response = model_with_tools.invoke( + "There's a syntax error in my primes.py file. Can you help me fix it?" + ) + # You must handle execution of the text editor commands in response.tool_calls via a tool execution loop + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + ```python + [{'name': 'str_replace_based_edit_tool', + 'args': {'command': 'view', 'path': '/root'}, + 'id': 'toolu_011BG5RbqnfBYkD8qQonS9k9', + 'type': 'tool_call'}] + ``` + + **Available tool versions:** @@ -1677,41 +2396,147 @@ I'll help you fix the syntax error in your primes.py file. Let me first take a l - `text_editor_20250728` (recommended) + + For a "batteries-included" implementation, consider using [`StateClaudeTextEditorMiddleware`](/oss/integrations/middleware/anthropic#text-editor) or [`FilesystemClaudeTextEditorMiddleware`](/oss/integrations/middleware/anthropic#text-editor) which provide LangGraph state integration or filesystem persistence, path validation, and other features. + + ### Web fetching -Claude can use a [web fetching tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-fetch-tool) to retrieve full content from specified web pages and PDF documents and ground its responses with citations. +Claude can use a server-side [web fetching tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-fetch-tool) to retrieve full content from specified web pages and PDF documents and ground its responses with citations. -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaWebFetchTool20250910Param # [!code highlight] + from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-haiku-4-5-20251001") + model = ChatAnthropic(model="claude-haiku-4-5-20251001") -tool = {"type": "web_fetch_20250910", "name": "web_fetch", "max_uses": 3} # [!code highlight] -model_with_tools = model.bind_tools([tool]) + fetch_tool = BetaWebFetchTool20250910Param( # [!code highlight] + name="web_fetch", # [!code highlight] + type="web_fetch_20250910", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] -response = model_with_tools.invoke( - "Please analyze the content at https://docs.langchain.com/" -) -``` + model_with_tools = model.bind_tools([fetch_tool]) # [!code highlight] + + response = model_with_tools.invoke( + "Please analyze the content at https://docs.langchain.com/" + ) + ``` + + + + ```python + from anthropic.types.beta import BetaWebFetchTool20250910Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + fetch_tool = BetaWebFetchTool20250910Param( # [!code highlight] + name="web_fetch", # [!code highlight] + type="web_fetch_20250910", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic(model="claude-haiku-4-5-20251001"), + tools=[fetch_tool], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "Analyze https://docs.langchain.com/"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-haiku-4-5-20251001") + + fetch_tool = {"type": "web_fetch_20250910", "name": "web_fetch", "max_uses": 3} # [!code highlight] + + model_with_tools = model.bind_tools([fetch_tool]) # [!code highlight] + + response = model_with_tools.invoke( + "Please analyze the content at https://docs.langchain.com/" + ) + ``` + + ### Web search -Claude can use a [web search tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-search-tool) to run searches and ground its responses with citations. +Claude can use a server-side [web search tool](https://platform.claude.com/docs/en/agents-and-tools/tool-use/web-search-tool) to run searches and ground its responses with citations. **Web search tool is supported since `langchain-anthropic>=0.3.13`** -```python -from langchain_anthropic import ChatAnthropic + + + ```python + from anthropic.types.beta import BetaWebSearchTool20250305Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + search_tool = BetaWebSearchTool20250305Param( # [!code highlight] + name="web_search", # [!code highlight] + type="web_search_20250305", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] + + model_with_tools = model.bind_tools([search_tool]) # [!code highlight] + + response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?") + ``` + + + + ```python + from anthropic.types.beta import BetaWebSearchTool20250305Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + + search_tool = BetaWebSearchTool20250305Param( # [!code highlight] + name="web_search", # [!code highlight] + type="web_search_20250305", # [!code highlight] + max_uses=3, # [!code highlight] + ) # [!code highlight] + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[search_tool], # [!code highlight] + ) -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + result = agent.invoke({ + "messages": [{"role": "user", "content": "How do I update a web app to TypeScript 5.5?"}] + }) -tool = {"type": "web_search_20250305", "name": "web_search", "max_uses": 3} # [!code highlight] -model_with_tools = model.bind_tools([tool]) + for message in result["messages"]: + message.pretty_print() + ``` + -response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?") -``` + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + search_tool = {"type": "web_search_20250305", "name": "web_search", "max_uses": 3} # [!code highlight] + + model_with_tools = model.bind_tools([search_tool]) # [!code highlight] + + response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?") + ``` + + ### Memory tool @@ -1721,30 +2546,185 @@ Claude supports a memory tool for client-side storage and retrieval of context a **Anthropic's built-in memory tool is supported since `langchain-anthropic>=0.3.21`** -```python -from langchain_anthropic import ChatAnthropic + + + ```python expandable + from typing import Literal -model = ChatAnthropic( - model="claude-sonnet-4-5-20250929", -) -model_with_tools = model.bind_tools([{"type": "memory_20250818", "name": "memory"}]) # [!code highlight] + from anthropic.types.beta import BetaMemoryTool20250818Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.messages import HumanMessage, ToolMessage + from langchain.tools import tool -response = model_with_tools.invoke("What are my interests?") -response.content_blocks -``` + tool_spec = BetaMemoryTool20250818Param( # [!code highlight] + name="memory", # [!code highlight] + type="memory_20250818", # [!code highlight] + ) # [!code highlight] -```output -[{'type': 'text', - 'text': "I'll check my memory to see what information I have about your interests."}, - {'type': 'tool_call', - 'name': 'memory', - 'args': {'command': 'view', 'path': '/memories'}, - 'id': 'toolu_01XeP9sxx44rcZHFNqXSaKqh'}] -``` + # Simple in-memory storage for demonstration purposes + memory_store: dict[str, str] = { + "/memories/interests": "User enjoys Python programming and hiking" + } + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def memory( + *, + command: Literal["view", "create", "str_replace", "insert", "delete", "rename"], + path: str, + content: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + insert_line: int | None = None, + new_path: str | None = None, + **kw, + ): + """Manage persistent memory across conversations.""" + if command == "view": + if path == "/memories": + # List all memories + return "\n".join(memory_store.keys()) or "No memories stored" + return memory_store.get(path, f"No memory at {path}") + elif command == "create": + memory_store[path] = content or "" + return f"Created memory at {path}" + elif command == "str_replace" and old_str is not None: + if path in memory_store: + memory_store[path] = memory_store[path].replace(old_str, new_str or "", 1) + return f"Updated {path}" + elif command == "delete": + memory_store.pop(path, None) + return f"Deleted {path}" + # ... implement other commands + return f"Executed {command} on {path}" + + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + model_with_tools = model.bind_tools([memory]) # [!code highlight] + + # Initial request + messages = [HumanMessage("What are my interests?")] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + + # Tool execution loop + while response.tool_calls: + tool_messages = [] + for tool_call in response.tool_calls: + result = memory.invoke(tool_call["args"]) + tool_messages.append(ToolMessage(content=result, tool_call_id=tool_call["id"])) + + messages = [*messages, response, *tool_messages] + response = model_with_tools.invoke(messages) + print(response.content_blocks) + ``` + + ```python + [{'type': 'text', + 'text': "I'll check my memory to see what information I have about your interests."}, + {'type': 'tool_call', + 'name': 'memory', + 'args': {'command': 'view', 'path': '/memories'}, + 'id': 'toolu_01XeP9sxx44rcZHFNqXSaKqh'}] + ``` + + + + ```python expandable + from typing import Literal + + from anthropic.types.beta import BetaMemoryTool20250818Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + # Simple in-memory storage + memory_store: dict[str, str] = { + "/memories/interests": "User enjoys Python programming and hiking" + } + + tool_spec = BetaMemoryTool20250818Param( # [!code highlight] + name="memory", # [!code highlight] + type="memory_20250818", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"provider_tool_definition": tool_spec}) # [!code highlight] + def memory( + *, + command: Literal["view", "create", "str_replace", "insert", "delete", "rename"], + path: str, + content: str | None = None, + old_str: str | None = None, + new_str: str | None = None, + **kw + ): + """Manage persistent memory across conversations.""" + if command == "view": + if path == "/memories": + return "\n".join(memory_store.keys()) or "No memories stored" + return memory_store.get(path, f"No memory at {path}") + elif command == "create": + memory_store[path] = content or "" + return f"Created memory at {path}" + elif command == "str_replace" and old_str is not None: + if path in memory_store: + memory_store[path] = memory_store[path].replace(old_str, new_str or "", 1) + return f"Updated {path}" + elif command == "delete": + memory_store.pop(path, None) + return f"Deleted {path}" + return f"Executed {command} on {path}" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[memory], # [!code highlight] + ) + + result = agent.invoke({ + "messages": [{"role": "user", "content": "What are my interests?"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + Using @[`create_agent`] handles the tool execution loop automatically. + + + + ```python + from langchain_anthropic import ChatAnthropic + + model = ChatAnthropic( + model="claude-sonnet-4-5-20250929", + ) + model_with_tools = model.bind_tools([{"type": "memory_20250818", "name": "memory"}]) # [!code highlight] + + response = model_with_tools.invoke("What are my interests?") + response.content_blocks + # You must handle execution of the memory commands in response.tool_calls via a tool execution loop + ``` + + ```python + [{'type': 'text', + 'text': "I'll check my memory to see what information I have about your interests."}, + {'type': 'tool_call', + 'name': 'memory', + 'args': {'command': 'view', 'path': '/memories'}, + 'id': 'toolu_01XeP9sxx44rcZHFNqXSaKqh'}] + ``` + + + + + For a "batteries-included" implementation, consider using [`StateClaudeMemoryMiddleware`](/oss/integrations/middleware/anthropic#memory) or [`FilesystemClaudeMemoryMiddleware`](/oss/integrations/middleware/anthropic#memory) which provide LangGraph state integration or filesystem persistence, automatic system prompt injection, and other features. + ### Tool search -Claude supports a [tool search](https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool) feature that enables dynamic tool discovery and loading. Instead of loading all tool definitions into the context window upfront, Claude can search your tool catalog and load only the tools it needs. +Claude supports a server-side [tool search](https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool) feature that enables dynamic tool discovery and loading. Instead of loading all tool definitions into the context window upfront, Claude can search your tool catalog and load only the tools it needs. This is useful when: @@ -1759,38 +2739,135 @@ There are two tool search variants: Use the `extras` parameter to specify `defer_loading` on LangChain tools: -```python -from langchain_anthropic import ChatAnthropic -from langchain.tools import tool - -@tool(extras={"defer_loading": True}) # [!code highlight] -def get_weather(location: str, unit: str = "fahrenheit") -> str: - """Get the current weather for a location. - - Args: - location: City name - unit: Temperature unit (celsius or fahrenheit) - """ - return f"Weather in {location}: Sunny" - -@tool(extras={"defer_loading": True}) # [!code highlight] -def search_files(query: str) -> str: - """Search through files in the workspace. - - Args: - query: Search query - """ - return f"Found files matching '{query}'" - -model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + + ```python expandable + from anthropic.types.beta import BetaToolSearchToolRegex20251119Param # [!code highlight] + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + @tool(extras={"defer_loading": True}) # [!code highlight] + def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Get the current weather for a location. + + Args: + location: City name + unit: Temperature unit (celsius or fahrenheit) + """ + return f"Weather in {location}: Sunny" + + @tool(extras={"defer_loading": True}) # [!code highlight] + def search_files(query: str) -> str: + """Search through files in the workspace. + + Args: + query: Search query + """ + return f"Found files matching '{query}'" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + tool_search = BetaToolSearchToolRegex20251119Param( # [!code highlight] + name="tool_search_tool_regex", # [!code highlight] + type="tool_search_tool_regex_20251119", # [!code highlight] + ) # [!code highlight] + + model_with_tools = model.bind_tools([ + tool_search, # [!code highlight] + get_weather, + search_files, + ]) + response = model_with_tools.invoke("What's the weather in San Francisco?") + ``` + + + + ```python expandable + from anthropic.types.beta import BetaToolSearchToolRegex20251119Param # [!code highlight] + from langchain.agents import create_agent + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + tool_search = BetaToolSearchToolRegex20251119Param( # [!code highlight] + name="tool_search_tool_regex", # [!code highlight] + type="tool_search_tool_regex_20251119", # [!code highlight] + ) # [!code highlight] + + + @tool(extras={"defer_loading": True}) # [!code highlight] + def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Get the current weather for a location. + + Args: + location: City name + unit: Temperature unit (celsius or fahrenheit) + """ + return f"Weather in {location}: Sunny" + + + @tool(extras={"defer_loading": True}) # [!code highlight] + def search_files(query: str) -> str: + """Search through files in the workspace. + + Args: + query: Search query + """ + return f"Found files matching '{query}'" + + + agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[ + tool_search, # [!code highlight] + get_weather, + search_files, + ], + ) -model_with_tools = model.bind_tools([ - {"type": "tool_search_tool_regex_20251119", "name": "tool_search_tool_regex"}, - get_weather, - search_files, -]) -response = model_with_tools.invoke("What's the weather in San Francisco?") -``` + result = agent.invoke({ + "messages": [{"role": "user", "content": "What's the weather in San Francisco?"}] + }) + + for message in result["messages"]: + message.pretty_print() + ``` + + + + ```python + from langchain_anthropic import ChatAnthropic + from langchain.tools import tool + + @tool(extras={"defer_loading": True}) # [!code highlight] + def get_weather(location: str, unit: str = "fahrenheit") -> str: + """Get the current weather for a location. + + Args: + location: City name + unit: Temperature unit (celsius or fahrenheit) + """ + return f"Weather in {location}: Sunny" + + @tool(extras={"defer_loading": True}) # [!code highlight] + def search_files(query: str) -> str: + """Search through files in the workspace. + + Args: + query: Search query + """ + return f"Found files matching '{query}'" + + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") + + model_with_tools = model.bind_tools([ + {"type": "tool_search_tool_regex_20251119", "name": "tool_search_tool_regex"}, # [!code highlight] + get_weather, + search_files, + ]) + response = model_with_tools.invoke("What's the weather in San Francisco?") + ``` + + ```mermaid sequenceDiagram @@ -1815,6 +2892,52 @@ sequenceDiagram See the [Claude documentation](https://platform.claude.com/docs/en/agents-and-tools/tool-use/tool-search-tool) for more details on tool search, including usage with MCP servers and client-side implementations. + +## Response metadata + +```python +ai_msg = model.invoke(messages) +ai_msg.response_metadata +``` + +```python +{ + "id": "msg_013xU6FHEGEq76aP4RgFerVT", + "model": "claude-sonnet-4-5-20250929", + "stop_reason": "end_turn", + "stop_sequence": None, + "usage": {"input_tokens": 25, "output_tokens": 11}, +} +``` + +## Token usage metadata + +```python +ai_msg = model.invoke(messages) +ai_msg.usage_metadata +``` + +```python +{"input_tokens": 25, "output_tokens": 11, "total_tokens": 36} +``` + +Message chunks containing token usage will be included during streaming by +default: + +```python +stream = model.stream(messages) +full = next(stream) +for chunk in stream: + full += chunk +full.usage_metadata +``` + +```python +{"input_tokens": 25, "output_tokens": 11, "total_tokens": 36} +``` + +These can be disabled by setting `stream_usage=False` in the stream method or when initializing `ChatAnthropic`. + --- ## API reference diff --git a/src/oss/python/integrations/chat/google_anthropic_vertex.mdx b/src/oss/python/integrations/chat/google_anthropic_vertex.mdx new file mode 100644 index 0000000000..5651e7fd52 --- /dev/null +++ b/src/oss/python/integrations/chat/google_anthropic_vertex.mdx @@ -0,0 +1,104 @@ +--- +title: ChatAnthropicVertex +description: Get started using Anthropic [chat models](/oss/langchain/models) via Vertex AI in LangChain. +--- + +> [Anthropic Claude 3](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude) models on Vertex AI offer fully managed and serverless models as APIs. To use a Claude model on Vertex AI, send a request directly to the Vertex AI API endpoint. Because Anthropic Claude 3 models use a managed API, there's no need to provision or manage infrastructure. + +NOTE : Anthropic Models on Vertex are implemented as Chat Model through class `ChatAnthropicVertex` + +```python +!pip install -U langchain-google-vertexai anthropic[vertex] +``` + +```python +from langchain.messages import ( + AIMessage, + AIMessageChunk, + HumanMessage, + SystemMessage, +) +from langchain_core.outputs import LLMResult +from langchain_google_vertexai.model_garden import ChatAnthropicVertex +``` + +NOTE : Specify the correct [Claude 3 Model Versions](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#claude-opus) + +We don't recommend using the Anthropic Claude 3 model versions that don't include a suffix that starts with an @ symbol (claude-3-opus, claude-3-sonnet, or claude-3-haiku). + +```python +# TODO : Replace below with your project id and region +project = "" +location = "" + +# Initialise the Model +model = ChatAnthropicVertex( + model_name="claude-3-haiku@20240307", + project=project, + location=location, +) +``` + +```python +# prepare input data for the model +raw_context = ( + "My name is Peter. You are my personal assistant. My favorite movies " + "are Lord of the Rings and Hobbit." +) +question = ( + "Hello, could you recommend a good movie for me to watch this evening, please?" +) +context = SystemMessage(content=raw_context) +message = HumanMessage(content=question) +``` + +```python +# Invoke the model +response = model.invoke([context, message]) +print(response.content) +``` + +```output +Since your favorite movies are the Lord of the Rings and Hobbit trilogies, I would recommend checking out some other epic fantasy films that have a similar feel: + +1. The Chronicles of Narnia series - These films are based on the beloved fantasy novels by C.S. Lewis and have a great blend of adventure, magic, and memorable characters. + +2. Stardust - This 2007 fantasy film, based on the Neil Gaiman novel, has an excellent cast and a charming, whimsical tone. + +3. The Golden Compass - The first film adaptation of Philip Pullman's His Dark Materials series, with stunning visuals and a compelling story. + +4. Pan's Labyrinth - Guillermo del Toro's dark, fairy tale-inspired masterpiece set against the backdrop of the Spanish Civil War. + +5. The Princess Bride - A classic fantasy adventure film with humor, romance, and unforgettable characters. + +Let me know if any of those appeal to you or if you'd like me to suggest something else! I'm happy to provide more personalized recommendations. +``` + +```python +# You can choose to initialize/ override the model name on Invoke method as well +response = model.invoke([context, message], model_name="claude-3-sonnet@20240229") +print(response.content) +``` + +```output +Sure, I'd be happy to recommend a movie for you! Since you mentioned that The Lord of the Rings and The Hobbit are among your favorite movies, I'll suggest some other epic fantasy/adventure films you might enjoy: + +1. The Princess Bride (1987) - A classic fairy tale with adventure, romance, and a lot of wit and humor. It has an all-star cast and very quotable lines. + +2. Willow (1988) - A fun fantasy film produced by George Lucas with fairies, dwarves, and brownies going on an epic quest. Has a similar tone to the Lord of the Rings movies. + +3. Stardust (2007) - An underrated fantasy adventure based on the Neil Gaiman novel about a young man entering a magical kingdom to retrieve a fallen star. Great cast and visuals. + +4. The Chronicles of Narnia series - The Lion, The Witch and The Wardrobe is the best known, but the other Narnia films are also very well done fantasy epics. + +5. The Golden Compass (2007) - First installment of the His Dark Materials trilogy, set in a parallel universe with armored polar bears and truth-seeking devices. + +Let me know if you'd like any other suggestions or have a particular style of movie in mind! I aimed for entertaining fantasy/adventure flicks similar to Lord of the Rings. +``` + +```python +# Use streaming responses +sync_response = model.stream([context, message], model_name="claude-3-haiku@20240307") +for chunk in sync_response: + print(chunk.content) +``` diff --git a/src/oss/python/integrations/chat/llama2_chat.mdx b/src/oss/python/integrations/chat/llama2_chat.mdx index d325c66e47..af629cd1c3 100644 --- a/src/oss/python/integrations/chat/llama2_chat.mdx +++ b/src/oss/python/integrations/chat/llama2_chat.mdx @@ -7,7 +7,7 @@ This notebook shows how to augment Llama-2 `LLM`s with the `Llama2Chat` wrapper `Llama2Chat` is a generic wrapper that implements `BaseChatModel` and can therefore be used in applications as [chat model](/oss/langchain/models). `Llama2Chat` converts a list of Messages into the [required chat prompt format](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) and forwards the formatted prompt as `str` to the wrapped `LLM`. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain_experimental.chat_models import Llama2Chat ``` diff --git a/src/oss/python/integrations/chat/llama_api.mdx b/src/oss/python/integrations/chat/llama_api.mdx index 38a39dc69c..f369e712e4 100644 --- a/src/oss/python/integrations/chat/llama_api.mdx +++ b/src/oss/python/integrations/chat/llama_api.mdx @@ -27,7 +27,7 @@ model = ChatLlamaAPI(client=llama) ``` ```python -from langchain.chains import create_tagging_chain +from langchain_classic.chains import create_tagging_chain schema = { "properties": { diff --git a/src/oss/python/integrations/chat/maritalk.mdx b/src/oss/python/integrations/chat/maritalk.mdx index c8f3a6405c..0d7eb5fe04 100644 --- a/src/oss/python/integrations/chat/maritalk.mdx +++ b/src/oss/python/integrations/chat/maritalk.mdx @@ -132,7 +132,7 @@ retriever = BM25Retriever.from_documents(texts) Now that we have our searcher, we just need to implement a prompt specifying the task and invoke the chain. ```python -from langchain.chains.question_answering import load_qa_chain +from langchain_classic.chains.question_answering import load_qa_chain prompt = """Baseado nos seguintes documentos, responda a pergunta abaixo. diff --git a/src/oss/python/integrations/chat/openai.mdx b/src/oss/python/integrations/chat/openai.mdx index 3145b1dc45..b2b843d69a 100644 --- a/src/oss/python/integrations/chat/openai.mdx +++ b/src/oss/python/integrations/chat/openai.mdx @@ -1297,8 +1297,9 @@ public class User {'token_usage': {'completion_tokens': 226, 'prompt_tokens': 166, 'total_tokens': 392, 'completion_tokens_details': {'accepted_prediction_tokens': 49, 'audio_tokens': None, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 107}, 'prompt_tokens_details': {'audio_tokens': None, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_45cf54deae', 'finish_reason': 'stop', 'logprobs': None} ``` -Note that currently predictions are billed as additional tokens and may increase your usage and costs in exchange for this reduced latency. ---- + +Predictions are billed as additional tokens and may increase your usage and costs in exchange for this reduced latency. + ## Audio Generation (Preview) diff --git a/src/oss/python/integrations/document_loaders/amazon_textract.mdx b/src/oss/python/integrations/document_loaders/amazon_textract.mdx index 9cb80458a0..80365e320f 100644 --- a/src/oss/python/integrations/document_loaders/amazon_textract.mdx +++ b/src/oss/python/integrations/document_loaders/amazon_textract.mdx @@ -134,7 +134,7 @@ os.environ["OPENAI_API_KEY"] = "your-OpenAI-API-key" ``` ```python -from langchain.chains.question_answering import load_qa_chain +from langchain_classic.chains.question_answering import load_qa_chain from langchain_openai import OpenAI chain = load_qa_chain(llm=OpenAI(), chain_type="map_reduce") diff --git a/src/oss/python/integrations/document_loaders/docugami.mdx b/src/oss/python/integrations/document_loaders/docugami.mdx index 2fa1481bd9..f71d396104 100644 --- a/src/oss/python/integrations/document_loaders/docugami.mdx +++ b/src/oss/python/integrations/document_loaders/docugami.mdx @@ -125,7 +125,7 @@ The documents returned by the loader are already split, so we don't need to use We will just use the output of the `DocugamiLoader` as-is to set up a retrieval QA chain the usual way. ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_chroma import Chroma from langchain_openai import OpenAI, OpenAIEmbeddings @@ -203,7 +203,7 @@ We can use a self-querying retriever to improve our query accuracy, using this a ``` ```python -from langchain.chains.query_constructor.schema import AttributeInfo +from langchain_classic.chains.query_constructor.schema import AttributeInfo from langchain_classic.retrievers.self_query.base import SelfQueryRetriever from langchain_chroma import Chroma diff --git a/src/oss/python/integrations/document_loaders/image_captions.mdx b/src/oss/python/integrations/document_loaders/image_captions.mdx index 6c05f14685..45260295ac 100644 --- a/src/oss/python/integrations/document_loaders/image_captions.mdx +++ b/src/oss/python/integrations/document_loaders/image_captions.mdx @@ -67,8 +67,8 @@ retriever = vectorstore.as_retriever(k=2) ### Query ```python -from langchain.chains import create_retrieval_chain -from langchain.chains.combine_documents import create_stuff_documents_chain +from langchain_classic.chains import create_retrieval_chain +from langchain_classic.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI diff --git a/src/oss/python/integrations/document_loaders/larksuite.mdx b/src/oss/python/integrations/document_loaders/larksuite.mdx index 92e520d1c0..6083a237ff 100644 --- a/src/oss/python/integrations/document_loaders/larksuite.mdx +++ b/src/oss/python/integrations/document_loaders/larksuite.mdx @@ -53,8 +53,7 @@ pprint(docs) ``` ```python -# see https://python.langchain.com/docs/use_cases/summarization for more details -from langchain.chains.summarize import load_summarize_chain +from langchain_classic.chains.summarize import load_summarize_chain from langchain_community.llms.fake import FakeListLLM llm = FakeListLLM() diff --git a/src/oss/python/integrations/document_loaders/needle.mdx b/src/oss/python/integrations/document_loaders/needle.mdx index 827079fe45..2057cf35ad 100644 --- a/src/oss/python/integrations/document_loaders/needle.mdx +++ b/src/oss/python/integrations/document_loaders/needle.mdx @@ -91,8 +91,8 @@ Below is a complete example of setting up a RAG pipeline with Needle within a ch ```python import os -from langchain.chains import create_retrieval_chain -from langchain.chains.combine_documents import create_stuff_documents_chain +from langchain_classic.chains import create_retrieval_chain +from langchain_classic.chains.combine_documents import create_stuff_documents_chain from langchain_community.retrievers.needle import NeedleRetriever from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI diff --git a/src/oss/python/integrations/document_loaders/psychic.mdx b/src/oss/python/integrations/document_loaders/psychic.mdx index 4a144fc021..925f3fb75f 100644 --- a/src/oss/python/integrations/document_loaders/psychic.mdx +++ b/src/oss/python/integrations/document_loaders/psychic.mdx @@ -39,7 +39,7 @@ documents = google_drive_loader.load() We can now convert these documents into embeddings and store them in a vector database like Chroma ```python -from langchain.chains import RetrievalQAWithSourcesChain +from langchain_classic.chains import RetrievalQAWithSourcesChain from langchain_chroma import Chroma from langchain_openai import OpenAI, OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter diff --git a/src/oss/python/integrations/document_loaders/youtube_audio.mdx b/src/oss/python/integrations/document_loaders/youtube_audio.mdx index 111e3689b9..c2ecec58c0 100644 --- a/src/oss/python/integrations/document_loaders/youtube_audio.mdx +++ b/src/oss/python/integrations/document_loaders/youtube_audio.mdx @@ -96,7 +96,7 @@ docs[0].page_content[0:500] Given `Documents`, we can easily enable chat / question+answering. ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_community.vectorstores import FAISS from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter diff --git a/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx b/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx index 1bf4035c14..cd24546059 100644 --- a/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx @@ -470,7 +470,7 @@ page_content='Eventually, they changed the name to Google; the name of the searc This shows an example of a complete RAG chain with a simple prompt template on how you can perform reranking using the Vertex Ranking API. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.documents import Document from langchain_core.prompts import PromptTemplate from langchain_core.runnables import RunnableParallel, RunnablePassthrough diff --git a/src/oss/python/integrations/document_transformers/jina_rerank.mdx b/src/oss/python/integrations/document_transformers/jina_rerank.mdx index ac13681b7d..bb562b0868 100644 --- a/src/oss/python/integrations/document_transformers/jina_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/jina_rerank.mdx @@ -86,8 +86,8 @@ pretty_print_docs(compressed_docs) ```python from langchain_classic import hub -from langchain.chains import create_retrieval_chain -from langchain.chains.combine_documents import create_stuff_documents_chain +from langchain_classic.chains import create_retrieval_chain +from langchain_classic.chains.combine_documents import create_stuff_documents_chain retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat") retrieval_qa_chat_prompt.pretty_print() diff --git a/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx b/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx index 580b5bca8d..1d6f34b52c 100644 --- a/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx +++ b/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx @@ -324,7 +324,7 @@ Together with our allies –we are right now enforcing powerful economic sanctio Can be used within a QA pipeline ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0) @@ -611,7 +611,7 @@ Tonight, I’d like to honor someone who has dedicated his life to serve this co You can use this retriever within a QA pipeline ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0) diff --git a/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx b/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx index 636ebf18b1..32b1f168b9 100644 --- a/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx +++ b/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx @@ -344,7 +344,7 @@ So let’s not abandon our streets. Or choose between safety and equal justice. You can of course use this retriever within a QA pipeline ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA ``` ```python diff --git a/src/oss/python/integrations/graphs/arangodb.mdx b/src/oss/python/integrations/graphs/arangodb.mdx index 71ab7814f5..ab07f1ef5f 100644 --- a/src/oss/python/integrations/graphs/arangodb.mdx +++ b/src/oss/python/integrations/graphs/arangodb.mdx @@ -285,7 +285,7 @@ os.environ["OPENAI_API_KEY"] = "your-key-here" ``` ```python -from langchain.chains import ArangoGraphQAChain +from langchain_classic.chains import ArangoGraphQAChain from langchain_openai import ChatOpenAI chain = ArangoGraphQAChain.from_llm( diff --git a/src/oss/python/integrations/graphs/falkordb.mdx b/src/oss/python/integrations/graphs/falkordb.mdx index e6448f8c10..0811182962 100644 --- a/src/oss/python/integrations/graphs/falkordb.mdx +++ b/src/oss/python/integrations/graphs/falkordb.mdx @@ -17,7 +17,7 @@ docker run -p 6379:6379 -it --rm falkordb/falkordb Once launched, you create a database on the local machine and connect to it. ```python -from langchain.chains import FalkorDBQAChain +from langchain_classic.chains import FalkorDBQAChain from langchain_community.graphs import FalkorDBGraph from langchain_openai import ChatOpenAI ``` diff --git a/src/oss/python/integrations/graphs/hugegraph.mdx b/src/oss/python/integrations/graphs/hugegraph.mdx index d50c3ddc0b..a0b7c8130f 100644 --- a/src/oss/python/integrations/graphs/hugegraph.mdx +++ b/src/oss/python/integrations/graphs/hugegraph.mdx @@ -86,7 +86,7 @@ g.addEdge("ActedIn", "1:Robert De Niro", "2:The Godfather Part II", {}) We can now create the `HugeGraph` and `HugeGraphQAChain`. To create the `HugeGraph` we simply need to pass the database object to the `HugeGraph` constructor. ```python -from langchain.chains import HugeGraphQAChain +from langchain_classic.chains import HugeGraphQAChain from langchain_community.graphs import HugeGraph from langchain_openai import ChatOpenAI ``` diff --git a/src/oss/python/integrations/graphs/networkx.mdx b/src/oss/python/integrations/graphs/networkx.mdx index 2aa2cddb04..546a2db678 100644 --- a/src/oss/python/integrations/graphs/networkx.mdx +++ b/src/oss/python/integrations/graphs/networkx.mdx @@ -71,7 +71,7 @@ graph.get_triples() We can now use the graph QA chain to ask question of the graph ```python -from langchain.chains import GraphQAChain +from langchain_classic.chains import GraphQAChain ``` ```python diff --git a/src/oss/python/integrations/graphs/ontotext.mdx b/src/oss/python/integrations/graphs/ontotext.mdx index 2f50117c5e..73f64fd15a 100644 --- a/src/oss/python/integrations/graphs/ontotext.mdx +++ b/src/oss/python/integrations/graphs/ontotext.mdx @@ -156,7 +156,7 @@ We can now use the `OntotextGraphDBQAChain` to ask some questions. ```python import os -from langchain.chains import OntotextGraphDBQAChain +from langchain_classic.chains import OntotextGraphDBQAChain from langchain_openai import ChatOpenAI # We'll be using an OpenAI model which requires an OpenAI API Key. diff --git a/src/oss/python/integrations/graphs/rdflib_sparql.mdx b/src/oss/python/integrations/graphs/rdflib_sparql.mdx index 3d810734df..c883fb8830 100644 --- a/src/oss/python/integrations/graphs/rdflib_sparql.mdx +++ b/src/oss/python/integrations/graphs/rdflib_sparql.mdx @@ -27,7 +27,7 @@ We have to install a python library: There are several sources you can run queries against, including files on the web, files you have available locally, SPARQL endpoints, e.g., [Wikidata](https://www.wikidata.org/wiki/Wikidata:Main_Page), and [triple stores](https://www.w3.org/wiki/LargeTripleStores). ```python -from langchain.chains import GraphSparqlQAChain +from langchain_classic.chains import GraphSparqlQAChain from langchain_community.graphs import RdfGraph from langchain_openai import ChatOpenAI ``` diff --git a/src/oss/python/integrations/graphs/timbr.mdx b/src/oss/python/integrations/graphs/timbr.mdx index 53869052fe..4d3bfaeb62 100644 --- a/src/oss/python/integrations/graphs/timbr.mdx +++ b/src/oss/python/integrations/graphs/timbr.mdx @@ -144,7 +144,7 @@ generate_sql_usage = usage_metadata.get('generate_sql', {}) You can combine multiple Timbr chains to create more complex workflows. ```python -from langchain.chains import SequentialChain +from langchain_classic.chains import SequentialChain from langchain_timbr import ExecuteTimbrQueryChain, GenerateAnswerChain execute_timbr_query_chain = ExecuteTimbrQueryChain( diff --git a/src/oss/python/integrations/llms/alibabacloud_pai_eas_endpoint.mdx b/src/oss/python/integrations/llms/alibabacloud_pai_eas_endpoint.mdx index 3cc3c9175b..d4af3e4ec7 100644 --- a/src/oss/python/integrations/llms/alibabacloud_pai_eas_endpoint.mdx +++ b/src/oss/python/integrations/llms/alibabacloud_pai_eas_endpoint.mdx @@ -10,7 +10,7 @@ pip install -qU langchain-community ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/anyscale.mdx b/src/oss/python/integrations/llms/anyscale.mdx index a142c0cff4..6cf7d114e9 100644 --- a/src/oss/python/integrations/llms/anyscale.mdx +++ b/src/oss/python/integrations/llms/anyscale.mdx @@ -25,7 +25,7 @@ os.environ["ANYSCALE_API_KEY"] = ANYSCALE_API_KEY ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import Anyscale from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/aphrodite.mdx b/src/oss/python/integrations/llms/aphrodite.mdx index 96768c4014..89ee840f69 100644 --- a/src/oss/python/integrations/llms/aphrodite.mdx +++ b/src/oss/python/integrations/llms/aphrodite.mdx @@ -69,7 +69,7 @@ I'm Ayumu "Osaka" Kasuga, and I'm an avid anime and manga fan! I'm pretty introv ## Integrate the model in an LLMChain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate template = """Question: {question} diff --git a/src/oss/python/integrations/llms/azure_ml.mdx b/src/oss/python/integrations/llms/azure_ml.mdx index 9c5553d65a..641dd88f3d 100644 --- a/src/oss/python/integrations/llms/azure_ml.mdx +++ b/src/oss/python/integrations/llms/azure_ml.mdx @@ -157,7 +157,7 @@ print(summarized_text) ### Example: Dolly with LLMChain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms.azureml_endpoint import DollyContentFormatter from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/banana.mdx b/src/oss/python/integrations/llms/banana.mdx index bfbe9ecf6b..b63d7c1932 100644 --- a/src/oss/python/integrations/llms/banana.mdx +++ b/src/oss/python/integrations/llms/banana.mdx @@ -33,7 +33,7 @@ os.environ["BANANA_API_KEY"] = "YOUR_API_KEY" ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import Banana from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/baseten.mdx b/src/oss/python/integrations/llms/baseten.mdx index 6b634a5752..b61b5c7d8f 100644 --- a/src/oss/python/integrations/llms/baseten.mdx +++ b/src/oss/python/integrations/llms/baseten.mdx @@ -53,7 +53,7 @@ We can chain together multiple calls to one or multiple models, which is the who For example, we can replace GPT with Mistral in this demo of terminal emulation. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.memory import ConversationBufferWindowMemory from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/bittensor.mdx b/src/oss/python/integrations/llms/bittensor.mdx index eabf08ce86..51d6894dfa 100644 --- a/src/oss/python/integrations/llms/bittensor.mdx +++ b/src/oss/python/integrations/llms/bittensor.mdx @@ -55,7 +55,7 @@ pprint(json_multi_resp) ## Using NIBittensorLLM with LLMChain and PromptTemplate ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.globals import set_debug from langchain_community.llms import NIBittensorLLM from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/cerebriumai.mdx b/src/oss/python/integrations/llms/cerebriumai.mdx index b5a7aa8bd3..e5266827bf 100644 --- a/src/oss/python/integrations/llms/cerebriumai.mdx +++ b/src/oss/python/integrations/llms/cerebriumai.mdx @@ -20,7 +20,7 @@ The `cerebrium` package is required to use the `CerebriumAI` API. Install `cereb ```python import os -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import CerebriumAI from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/chatglm.mdx b/src/oss/python/integrations/llms/chatglm.mdx index fe91d38b22..891bd3ae0d 100644 --- a/src/oss/python/integrations/llms/chatglm.mdx +++ b/src/oss/python/integrations/llms/chatglm.mdx @@ -19,7 +19,7 @@ pip install -qU langchain langchain-community This example goes over how to use LangChain to interact with ChatGLM3-6B Inference for text completion. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms.chatglm3 import ChatGLM3 from langchain.messages import AIMessage from langchain_core.prompts import PromptTemplate @@ -63,7 +63,7 @@ The following example shows how to use LangChain to interact with the ChatGLM2-6 ChatGLM-6B and ChatGLM2-6B has the same api specs, so this example should work with both. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import ChatGLM from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/clarifai.mdx b/src/oss/python/integrations/llms/clarifai.mdx index 2b07c021ee..ec2972e406 100644 --- a/src/oss/python/integrations/llms/clarifai.mdx +++ b/src/oss/python/integrations/llms/clarifai.mdx @@ -36,7 +36,7 @@ CLARIFAI_PAT = getpass() ```python # Import the required modules -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import Clarifai from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/cloudflare_workersai.mdx b/src/oss/python/integrations/llms/cloudflare_workersai.mdx index c0b6bfb4e2..7617e59f8e 100644 --- a/src/oss/python/integrations/llms/cloudflare_workersai.mdx +++ b/src/oss/python/integrations/llms/cloudflare_workersai.mdx @@ -7,7 +7,7 @@ title: Cloudflare Workers AI Both Cloudflare account ID and API token are required. Find how to obtain them from [this document](https://developers.cloudflare.com/workers-ai/get-started/rest-api/). ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/ctransformers.mdx b/src/oss/python/integrations/llms/ctransformers.mdx index 0d46ef0a1f..e1efb82241 100644 --- a/src/oss/python/integrations/llms/ctransformers.mdx +++ b/src/oss/python/integrations/llms/ctransformers.mdx @@ -41,7 +41,7 @@ response = llm.invoke("AI is going to") **LLMChain** ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate template = """Question: {question} diff --git a/src/oss/python/integrations/llms/ctranslate2.mdx b/src/oss/python/integrations/llms/ctranslate2.mdx index c8f7040355..576e13e952 100644 --- a/src/oss/python/integrations/llms/ctranslate2.mdx +++ b/src/oss/python/integrations/llms/ctranslate2.mdx @@ -78,7 +78,7 @@ generations=[[Generation(text='The list of top romantic songs:\n1. “I Will Alw ## Integrate the model in an LLMChain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate template = """{question} diff --git a/src/oss/python/integrations/llms/deepinfra.mdx b/src/oss/python/integrations/llms/deepinfra.mdx index 2af6eb82dd..6f38365146 100644 --- a/src/oss/python/integrations/llms/deepinfra.mdx +++ b/src/oss/python/integrations/llms/deepinfra.mdx @@ -83,7 +83,7 @@ prompt = PromptTemplate.from_template(template) ## Initiate the LLMChain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain llm_chain = LLMChain(prompt=prompt, llm=llm) ``` diff --git a/src/oss/python/integrations/llms/edenai.mdx b/src/oss/python/integrations/llms/edenai.mdx index ab411b5d66..6a269a8ad6 100644 --- a/src/oss/python/integrations/llms/edenai.mdx +++ b/src/oss/python/integrations/llms/edenai.mdx @@ -41,7 +41,7 @@ For instance, let's explore the models provided by OpenAI, such as GPT3.5 ### text generation ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate llm = EdenAI( @@ -118,7 +118,7 @@ print(llm.invoke(prompt)) ## Chaining Calls ```python -from langchain.chains import LLMChain, SimpleSequentialChain +from langchain_classic.chains import LLMChain, SimpleSequentialChain from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/forefrontai.mdx b/src/oss/python/integrations/llms/forefrontai.mdx index fc7774931e..2a4ffc10de 100644 --- a/src/oss/python/integrations/llms/forefrontai.mdx +++ b/src/oss/python/integrations/llms/forefrontai.mdx @@ -11,7 +11,7 @@ This notebook goes over how to use LangChain with [ForefrontAI](https://www.fore ```python import os -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import ForefrontAI from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/google_vertex_ai.mdx b/src/oss/python/integrations/llms/google_vertex_ai.mdx index ce59fb2be0..8d56f2b51c 100644 --- a/src/oss/python/integrations/llms/google_vertex_ai.mdx +++ b/src/oss/python/integrations/llms/google_vertex_ai.mdx @@ -599,105 +599,3 @@ chat_llm.invoke([message1]) ```output AIMessage(content='Prompt:\nuser\nHow much is 2+2?\nmodel\nOutput:\nThe answer is 4.\n2 + 2 = 4.', id='run-cea563df-e91a-4374-83a1-3d8b186a01b2-0') ``` - -## Anthropic on Vertex AI - -> [Anthropic Claude 3](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude) models on Vertex AI offer fully managed and serverless models as APIs. To use a Claude model on Vertex AI, send a request directly to the Vertex AI API endpoint. Because Anthropic Claude 3 models use a managed API, there's no need to provision or manage infrastructure. - -NOTE : Anthropic Models on Vertex are implemented as Chat Model through class `ChatAnthropicVertex` - -```python -!pip install -U langchain-google-vertexai anthropic[vertex] -``` - -```python -from langchain.messages import ( - AIMessage, - AIMessageChunk, - HumanMessage, - SystemMessage, -) -from langchain_core.outputs import LLMResult -from langchain_google_vertexai.model_garden import ChatAnthropicVertex -``` - -NOTE : Specify the correct [Claude 3 Model Versions](https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#claude-opus) - -We don't recommend using the Anthropic Claude 3 model versions that don't include a suffix that starts with an @ symbol (claude-3-opus, claude-3-sonnet, or claude-3-haiku). - -```python -# TODO : Replace below with your project id and region -project = "" -location = "" - -# Initialise the Model -model = ChatAnthropicVertex( - model_name="claude-3-haiku@20240307", - project=project, - location=location, -) -``` - -```python -# prepare input data for the model -raw_context = ( - "My name is Peter. You are my personal assistant. My favorite movies " - "are Lord of the Rings and Hobbit." -) -question = ( - "Hello, could you recommend a good movie for me to watch this evening, please?" -) -context = SystemMessage(content=raw_context) -message = HumanMessage(content=question) -``` - -```python -# Invoke the model -response = model.invoke([context, message]) -print(response.content) -``` - -```output -Since your favorite movies are the Lord of the Rings and Hobbit trilogies, I would recommend checking out some other epic fantasy films that have a similar feel: - -1. The Chronicles of Narnia series - These films are based on the beloved fantasy novels by C.S. Lewis and have a great blend of adventure, magic, and memorable characters. - -2. Stardust - This 2007 fantasy film, based on the Neil Gaiman novel, has an excellent cast and a charming, whimsical tone. - -3. The Golden Compass - The first film adaptation of Philip Pullman's His Dark Materials series, with stunning visuals and a compelling story. - -4. Pan's Labyrinth - Guillermo del Toro's dark, fairy tale-inspired masterpiece set against the backdrop of the Spanish Civil War. - -5. The Princess Bride - A classic fantasy adventure film with humor, romance, and unforgettable characters. - -Let me know if any of those appeal to you or if you'd like me to suggest something else! I'm happy to provide more personalized recommendations. -``` - -```python -# You can choose to initialize/ override the model name on Invoke method as well -response = model.invoke([context, message], model_name="claude-3-sonnet@20240229") -print(response.content) -``` - -```output -Sure, I'd be happy to recommend a movie for you! Since you mentioned that The Lord of the Rings and The Hobbit are among your favorite movies, I'll suggest some other epic fantasy/adventure films you might enjoy: - -1. The Princess Bride (1987) - A classic fairy tale with adventure, romance, and a lot of wit and humor. It has an all-star cast and very quotable lines. - -2. Willow (1988) - A fun fantasy film produced by George Lucas with fairies, dwarves, and brownies going on an epic quest. Has a similar tone to the Lord of the Rings movies. - -3. Stardust (2007) - An underrated fantasy adventure based on the Neil Gaiman novel about a young man entering a magical kingdom to retrieve a fallen star. Great cast and visuals. - -4. The Chronicles of Narnia series - The Lion, The Witch and The Wardrobe is the best known, but the other Narnia films are also very well done fantasy epics. - -5. The Golden Compass (2007) - First installment of the His Dark Materials trilogy, set in a parallel universe with armored polar bears and truth-seeking devices. - -Let me know if you'd like any other suggestions or have a particular style of movie in mind! I aimed for entertaining fantasy/adventure flicks similar to Lord of the Rings. -``` - -```python -# Use streaming responses -sync_response = model.stream([context, message], model_name="claude-3-haiku@20240307") -for chunk in sync_response: - print(chunk.content) -``` diff --git a/src/oss/python/integrations/llms/gooseai.mdx b/src/oss/python/integrations/llms/gooseai.mdx index 30bd6ec671..b739781d1d 100644 --- a/src/oss/python/integrations/llms/gooseai.mdx +++ b/src/oss/python/integrations/llms/gooseai.mdx @@ -19,7 +19,7 @@ pip install -qU langchain-openai ```python import os -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import GooseAI from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/gradient.mdx b/src/oss/python/integrations/llms/gradient.mdx index 209a8c8367..03abb329f0 100644 --- a/src/oss/python/integrations/llms/gradient.mdx +++ b/src/oss/python/integrations/llms/gradient.mdx @@ -9,7 +9,7 @@ This notebook goes over how to use LangChain with [Gradient](https://gradient.ai ## Imports ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import GradientLLM from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/huggingface_endpoint.mdx b/src/oss/python/integrations/llms/huggingface_endpoint.mdx index 848e607a03..b63be100a9 100644 --- a/src/oss/python/integrations/llms/huggingface_endpoint.mdx +++ b/src/oss/python/integrations/llms/huggingface_endpoint.mdx @@ -42,7 +42,7 @@ from langchain_huggingface import HuggingFaceEndpoint ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/ipex_llm.mdx b/src/oss/python/integrations/llms/ipex_llm.mdx index d9dcdeb8a5..80a7d5e14f 100644 --- a/src/oss/python/integrations/llms/ipex_llm.mdx +++ b/src/oss/python/integrations/llms/ipex_llm.mdx @@ -73,7 +73,7 @@ os.environ["SYCL_CACHE_PERSISTENT"] = "1" ```python import warnings -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import IpexLLM from langchain_core.prompts import PromptTemplate @@ -180,7 +180,7 @@ pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pyt ```python import warnings -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import IpexLLM from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/javelin.mdx b/src/oss/python/integrations/llms/javelin.mdx index 1ee232b778..7ca4ea357e 100644 --- a/src/oss/python/integrations/llms/javelin.mdx +++ b/src/oss/python/integrations/llms/javelin.mdx @@ -27,7 +27,7 @@ This section will demonstrate how to interact with the Javelin AI Gateway to get (note) assumes that you have setup a route in the gateway called 'eng_dept03' ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import JavelinAIGateway from langchain_core.prompts import PromptTemplate @@ -55,7 +55,7 @@ ImportError Traceback (most recent call last) ``` ```output Cell In[6], line 2 - 1 from langchain.chains import LLMChain + 1 from langchain_classic.chains import LLMChain ----> 2 from langchain.llms import JavelinAIGateway 3 from langchain.prompts import PromptTemplate 5 route_completions = "eng_dept03" diff --git a/src/oss/python/integrations/llms/manifest.mdx b/src/oss/python/integrations/llms/manifest.mdx index cda6253df6..6f358bc625 100644 --- a/src/oss/python/integrations/llms/manifest.mdx +++ b/src/oss/python/integrations/llms/manifest.mdx @@ -32,7 +32,7 @@ llm = ManifestWrapper( ```python # Map reduce example -from langchain.chains.mapreduce import MapReduceChain +from langchain_classic.chains import MapReduceChain from langchain_core.prompts import PromptTemplate from langchain_text_splitters import CharacterTextSplitter diff --git a/src/oss/python/integrations/llms/minimax.mdx b/src/oss/python/integrations/llms/minimax.mdx index 3e006d37ba..23b114bb62 100644 --- a/src/oss/python/integrations/llms/minimax.mdx +++ b/src/oss/python/integrations/llms/minimax.mdx @@ -39,7 +39,7 @@ os.environ["MINIMAX_GROUP_ID"] = "YOUR_GROUP_ID" ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import Minimax from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/modal.mdx b/src/oss/python/integrations/llms/modal.mdx index 1a17ecd7e6..3712493777 100644 --- a/src/oss/python/integrations/llms/modal.mdx +++ b/src/oss/python/integrations/llms/modal.mdx @@ -69,7 +69,7 @@ def web(request: Request): Once you have a deployed Modal web endpoint, you can pass its URL into the `langchain.llms.modal.Modal` LLM class. This class can then function as a building block in your chain. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import Modal from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/mosaicml.mdx b/src/oss/python/integrations/llms/mosaicml.mdx index c7d79e67eb..d68a68549b 100644 --- a/src/oss/python/integrations/llms/mosaicml.mdx +++ b/src/oss/python/integrations/llms/mosaicml.mdx @@ -21,7 +21,7 @@ os.environ["MOSAICML_API_TOKEN"] = MOSAICML_API_TOKEN ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import MosaicML from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/nlpcloud.mdx b/src/oss/python/integrations/llms/nlpcloud.mdx index 174ef9f8e0..40150a703e 100644 --- a/src/oss/python/integrations/llms/nlpcloud.mdx +++ b/src/oss/python/integrations/llms/nlpcloud.mdx @@ -29,7 +29,7 @@ os.environ["NLPCLOUD_API_KEY"] = NLPCLOUD_API_KEY ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import NLPCloud from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/octoai.mdx b/src/oss/python/integrations/llms/octoai.mdx index 5a79872017..a21316849b 100644 --- a/src/oss/python/integrations/llms/octoai.mdx +++ b/src/oss/python/integrations/llms/octoai.mdx @@ -23,7 +23,7 @@ os.environ["OCTOAI_API_TOKEN"] = "OCTOAI_API_TOKEN" ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms.octoai_endpoint import OctoAIEndpoint from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/opaqueprompts.mdx b/src/oss/python/integrations/llms/opaqueprompts.mdx index 688c05b850..fc31d9be37 100644 --- a/src/oss/python/integrations/llms/opaqueprompts.mdx +++ b/src/oss/python/integrations/llms/opaqueprompts.mdx @@ -27,7 +27,7 @@ os.environ["OPENAI_API_KEY"] = "" Applying OpaquePrompts to your application could be as simple as wrapping your LLM using the OpaquePrompts class by replace `llm=OpenAI()` with `llm=OpaquePrompts(base_llm=OpenAI())`. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.globals import set_debug, set_verbose from langchain.memory import ConversationBufferWindowMemory from langchain_community.llms import OpaquePrompts diff --git a/src/oss/python/integrations/llms/openlm.mdx b/src/oss/python/integrations/llms/openlm.mdx index 752dc24486..6c2487608b 100644 --- a/src/oss/python/integrations/llms/openlm.mdx +++ b/src/oss/python/integrations/llms/openlm.mdx @@ -39,7 +39,7 @@ if "HF_API_TOKEN" not in os.environ: Here we're going to call two models in an LLMChain, `text-davinci-003` from OpenAI and `gpt2` on HuggingFace. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import OpenLM from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/petals.mdx b/src/oss/python/integrations/llms/petals.mdx index 4bb0241ee2..9c8f385d65 100644 --- a/src/oss/python/integrations/llms/petals.mdx +++ b/src/oss/python/integrations/llms/petals.mdx @@ -21,7 +21,7 @@ For Apple Silicon(M1/M2) users please follow this guide [https://github.com/bigs ```python import os -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import Petals from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/predibase.mdx b/src/oss/python/integrations/llms/predibase.mdx index 075d4a6c4b..de33fb8abf 100644 --- a/src/oss/python/integrations/llms/predibase.mdx +++ b/src/oss/python/integrations/llms/predibase.mdx @@ -120,7 +120,7 @@ llm = Predibase( ## SequentialChain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate ``` @@ -147,7 +147,7 @@ review_chain = LLMChain(llm=llm, prompt=prompt_template) ```python # This is the overall chain where we run these two chains in sequence. -from langchain.chains import SimpleSequentialChain +from langchain_classic.chains import SimpleSequentialChain overall_chain = SimpleSequentialChain( chains=[synopsis_chain, review_chain], verbose=True diff --git a/src/oss/python/integrations/llms/replicate.mdx b/src/oss/python/integrations/llms/replicate.mdx index 4505f63e73..b2f6c71eda 100644 --- a/src/oss/python/integrations/llms/replicate.mdx +++ b/src/oss/python/integrations/llms/replicate.mdx @@ -54,7 +54,7 @@ os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import Replicate from langchain_core.prompts import PromptTemplate ``` @@ -225,7 +225,7 @@ Stopped output runtime: 25.77039254200008 seconds The whole point of langchain is to... chain! Here's an example of how do that. ```python -from langchain.chains import SimpleSequentialChain +from langchain_classic.chains import SimpleSequentialChain ``` First, let's define the LLM for this model as a flan-5, and text2image as a stable diffusion model. diff --git a/src/oss/python/integrations/llms/runhouse.mdx b/src/oss/python/integrations/llms/runhouse.mdx index 3deedd21f8..42efa62b5b 100644 --- a/src/oss/python/integrations/llms/runhouse.mdx +++ b/src/oss/python/integrations/llms/runhouse.mdx @@ -14,7 +14,7 @@ pip install -qU runhouse ```python import runhouse as rh -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/sagemaker.mdx b/src/oss/python/integrations/llms/sagemaker.mdx index aa627f992c..6c5ab917b6 100644 --- a/src/oss/python/integrations/llms/sagemaker.mdx +++ b/src/oss/python/integrations/llms/sagemaker.mdx @@ -51,7 +51,7 @@ import json from typing import Dict import boto3 -from langchain.chains.question_answering import load_qa_chain +from langchain_classic.chains.question_answering import load_qa_chain from langchain_aws.llms import SagemakerEndpoint from langchain_aws.llms.sagemaker_endpoint import LLMContentHandler from langchain_core.prompts import PromptTemplate @@ -116,7 +116,7 @@ chain({"input_documents": docs, "question": query}, return_only_outputs=True) import json from typing import Dict -from langchain.chains.question_answering import load_qa_chain +from langchain_classic.chains.question_answering import load_qa_chain from langchain_aws.llms import SagemakerEndpoint from langchain_aws.llms.sagemaker_endpoint import LLMContentHandler from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/solar.mdx b/src/oss/python/integrations/llms/solar.mdx index 1104b2de9a..5f30c5b7d6 100644 --- a/src/oss/python/integrations/llms/solar.mdx +++ b/src/oss/python/integrations/llms/solar.mdx @@ -15,7 +15,7 @@ llm.invoke("tell me a story?") ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms.solar import Solar from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/llms/stochasticai.mdx b/src/oss/python/integrations/llms/stochasticai.mdx index 0d551d2e33..3b80d6df15 100644 --- a/src/oss/python/integrations/llms/stochasticai.mdx +++ b/src/oss/python/integrations/llms/stochasticai.mdx @@ -33,7 +33,7 @@ YOUR_API_URL = getpass() ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import StochasticAI from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/symblai_nebula.mdx b/src/oss/python/integrations/llms/symblai_nebula.mdx index 10ac795bfa..8f51522f29 100644 --- a/src/oss/python/integrations/llms/symblai_nebula.mdx +++ b/src/oss/python/integrations/llms/symblai_nebula.mdx @@ -19,7 +19,7 @@ llm = Nebula(nebula_api_key="") Use a conversation transcript and instruction to construct a prompt. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate conversation = """Sam: Good morning, team! Let's keep this standup concise. We'll go in the usual order: what you did yesterday, what you plan to do today, and any blockers. Alex, kick us off. diff --git a/src/oss/python/integrations/llms/textgen.mdx b/src/oss/python/integrations/llms/textgen.mdx index d9888f6a2f..d15c796bad 100644 --- a/src/oss/python/integrations/llms/textgen.mdx +++ b/src/oss/python/integrations/llms/textgen.mdx @@ -17,7 +17,7 @@ model_url = "http://localhost:5000" ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.globals import set_debug from langchain_community.llms import TextGen from langchain_core.prompts import PromptTemplate @@ -47,7 +47,7 @@ model_url = "ws://localhost:5005" ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.globals import set_debug from langchain_community.llms import TextGen from langchain_core.callbacks import StreamingStdOutCallbackHandler diff --git a/src/oss/python/integrations/llms/vllm.mdx b/src/oss/python/integrations/llms/vllm.mdx index 96bdd36955..ab3ef6ff75 100644 --- a/src/oss/python/integrations/llms/vllm.mdx +++ b/src/oss/python/integrations/llms/vllm.mdx @@ -46,7 +46,7 @@ What is the capital of France ? The capital of France is Paris. ## Integrate the model in an LLMChain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate template = """Question: {question} diff --git a/src/oss/python/integrations/llms/xinference.mdx b/src/oss/python/integrations/llms/xinference.mdx index 161f09885a..85f8f12dfd 100644 --- a/src/oss/python/integrations/llms/xinference.mdx +++ b/src/oss/python/integrations/llms/xinference.mdx @@ -57,7 +57,7 @@ llm( ### Integrate with a LLMChain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate template = "Where can we visit in the capital of {country}?" diff --git a/src/oss/python/integrations/llms/yandex.mdx b/src/oss/python/integrations/llms/yandex.mdx index 6fe3f52124..fc7d7a2cc0 100644 --- a/src/oss/python/integrations/llms/yandex.mdx +++ b/src/oss/python/integrations/llms/yandex.mdx @@ -25,7 +25,7 @@ To specify the model you can use `model_uri` parameter, see [the documentation]( By default, the latest version of `yandexgpt-lite` is used from the folder specified in the parameter `folder_id` or `YC_FOLDER_ID` environment variable. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import YandexGPT from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/llms/yuan2.mdx b/src/oss/python/integrations/llms/yuan2.mdx index 38aeee3ff5..5ac3ace351 100644 --- a/src/oss/python/integrations/llms/yuan2.mdx +++ b/src/oss/python/integrations/llms/yuan2.mdx @@ -9,7 +9,7 @@ This example goes over how to use LangChain to interact with `Yuan2.0`(2B/51B/10 Yuan2.0 set up an inference service so user just need request the inference api to get result, which is introduced in [Yuan2.0 Inference-Server](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/docs/inference_server.md). ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms.yuan2 import Yuan2 ``` diff --git a/src/oss/python/integrations/middleware/anthropic.mdx b/src/oss/python/integrations/middleware/anthropic.mdx index 65bfdcacdb..fd8b2e4ae7 100644 --- a/src/oss/python/integrations/middleware/anthropic.mdx +++ b/src/oss/python/integrations/middleware/anthropic.mdx @@ -12,6 +12,105 @@ Middleware specifically designed for Anthropic's Claude models. Learn more about | [Memory](#memory) | Provide Claude's memory tool for persistent agent memory | | [File search](#file-search) | Search tools for state-based file systems | +## Middleware vs tools + +`langchain-anthropic` provides two ways to use Claude's native tools: + +- **Middleware** (this page): Production-ready implementations with built-in execution, state management, and security policies +- **Tools** (via [`bind_tools`](/oss/integrations/chat/anthropic#built-in-tools)): Low-level building blocks where you provide your own execution logic + +### When to use which + +| Use case | Recommended | Why | +|----------|-------------|-----| +| Production agents with bash | Middleware | Persistent sessions, Docker isolation, output redaction | +| State-based file editing | Middleware | Built-in LangGraph state persistence | +| Filesystem file editing | Middleware | Writes to disk with path validation | +| Custom execution logic | Tools | Full control over execution | +| Quick prototype | Tools | Simpler, bring your own callback | +| Non-agent use with @[`bind_tools`][ChatAnthropic.bind_tools] | Tools | Middleware requires @[`create_agent`] | + +### Feature comparison + +| Feature | Middleware | Tools | +|---------|:----------:|:-----:| +| Works with @[`create_agent`] | ✅ | ✅ | +| Works with @[`bind_tools`][ChatAnthropic.bind_tools] | ❌ | ✅ | +| Built-in state management | ✅ | ❌ | +| Custom execute callback | ❌ | ✅ | + + + +**Using middleware** (turnkey solution): + +```python +from langchain_anthropic import ChatAnthropic +from langchain_anthropic.middleware import ClaudeBashToolMiddleware +from langchain.agents import create_agent +from langchain.agents.middleware import DockerExecutionPolicy + +# Production-ready with Docker isolation, session management, etc. +agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + middleware=[ + ClaudeBashToolMiddleware( + workspace_root="/workspace", + execution_policy=DockerExecutionPolicy(image="python:3.11"), + startup_commands=["pip install pandas"], + ), + ], +) +``` + +**Using tools** (bring your own execution): + +```python +import subprocess + +from anthropic.types.beta import BetaToolBash20250124Param +from langchain_anthropic import ChatAnthropic +from langchain.agents import create_agent +from langchain.tools import tool + +tool_spec = BetaToolBash20250124Param( + name="bash", + type="bash_20250124", + strict=True, +) + +@tool(extras={"provider_tool_definition": tool_spec}) +def bash(*, command: str, restart: bool = False, **kw): + """Execute a bash command.""" + if restart: + return "Bash session restarted" + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=30, + ) + return result.stdout + result.stderr + except Exception as e: + return f"Error: {e}" + + +agent = create_agent( + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), + tools=[bash], +) + +result = agent.invoke( + {"messages": [{"role": "user", "content": "List files in this directory"}]} +) +print(result["messages"][-1].content) +``` + + + +--- + ## Prompt caching Reduce costs and latency by caching static or repetitive prompt content (like system prompts, tool definitions, and conversation history) on Anthropic's servers. This middleware implements a **conversational caching strategy** that places cache breakpoints after the most recent message, allowing the entire conversation history (including the latest user message) to be cached and reused in subsequent API calls. diff --git a/src/oss/python/integrations/providers/aim_tracking.mdx b/src/oss/python/integrations/providers/aim_tracking.mdx index caa0741084..b9f9ee81f9 100644 --- a/src/oss/python/integrations/providers/aim_tracking.mdx +++ b/src/oss/python/integrations/providers/aim_tracking.mdx @@ -74,7 +74,7 @@ aim_callback.flush_tracker(

Scenario 2

Scenario two involves chaining with multiple SubChains across multiple generations. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate ``` diff --git a/src/oss/python/integrations/providers/all_providers.mdx b/src/oss/python/integrations/providers/all_providers.mdx index 8453cd6766..e5a06c6116 100644 --- a/src/oss/python/integrations/providers/all_providers.mdx +++ b/src/oss/python/integrations/providers/all_providers.mdx @@ -41,6 +41,14 @@ Browse the complete collection of integrations available for Python. LangChain P Advertising platform for GPT applications and AI services. + + Open event-based protocol for connecting LangGraph agents to any frontend. + + + + React framework with pre-built UI components for AI copilots. + + + + Framework-agnostic language for portable agent definitions. + + - - Anthropic on Vertex AI Model Garden - **Deprecated** – Use [`ChatGoogleGenerativeAI`](/oss/integrations/chat/google_generative_ai) for Gemini models instead. + + Anthropic on Vertex AI Model Garden + diff --git a/src/oss/python/integrations/providers/javelin_ai_gateway.mdx b/src/oss/python/integrations/providers/javelin_ai_gateway.mdx index 7750d5ecc6..7b77480e58 100644 --- a/src/oss/python/integrations/providers/javelin_ai_gateway.mdx +++ b/src/oss/python/integrations/providers/javelin_ai_gateway.mdx @@ -36,7 +36,7 @@ export JAVELIN_API_KEY=... ## Completions Example ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.llms import JavelinAIGateway from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/providers/localai.mdx b/src/oss/python/integrations/providers/localai.mdx index a2d4ec5528..f0c383c32f 100644 --- a/src/oss/python/integrations/providers/localai.mdx +++ b/src/oss/python/integrations/providers/localai.mdx @@ -8,10 +8,6 @@ title: LocalAI > audio (and not only) locally or on-prem with consumer grade hardware, > supporting multiple model families and architectures. - -**For proper compatibility, please ensure you are using the `openai` SDK at version **0.x**.** - - `langchain-localai` is a 3rd party integration package for LocalAI. It provides a simple way to use LocalAI services in LangChain. The source code is available on [GitHub](https://github.com/mkhludnev/langchain-localai) @@ -19,15 +15,13 @@ The source code is available on [GitHub](https://github.com/mkhludnev/langchain- ## Installation and Setup -We have to install several python packages: - ```bash pip -pip install tenacity openai +pip install langchain-localai ``` ```bash uv -uv add tenacity openai +uv add langchain-localai ``` @@ -36,6 +30,6 @@ uv add tenacity openai See a [usage example](/oss/integrations/text_embedding/localai). -```python -from langchain_localai import LocalAIEmbeddings -``` +## Reranker + +See a [usage example](/oss/integrations/document_transformers/localai). diff --git a/src/oss/python/integrations/providers/mlflow.mdx b/src/oss/python/integrations/providers/mlflow.mdx index 5f2d4d1f09..1a02ae78bd 100644 --- a/src/oss/python/integrations/providers/mlflow.mdx +++ b/src/oss/python/integrations/providers/mlflow.mdx @@ -65,7 +65,7 @@ See the [API documentation and examples](https://www.mlflow.org/docs/latest/llms ```python import mlflow -from langchain.chains import LLMChain, PromptTemplate +from langchain_classic.chains import LLMChain, PromptTemplate from langchain_community.llms import Mlflow llm = Mlflow( diff --git a/src/oss/python/integrations/providers/ontotext_graphdb.mdx b/src/oss/python/integrations/providers/ontotext_graphdb.mdx index ee9006e5fb..e8f499a407 100644 --- a/src/oss/python/integrations/providers/ontotext_graphdb.mdx +++ b/src/oss/python/integrations/providers/ontotext_graphdb.mdx @@ -26,5 +26,5 @@ See the notebook example [here](/oss/integrations/graphs/ontotext). ```python from langchain_community.graphs import OntotextGraphDBGraph -from langchain.chains import OntotextGraphDBQAChain +from langchain_classic.chains import OntotextGraphDBQAChain ``` diff --git a/src/oss/python/integrations/providers/overview.mdx b/src/oss/python/integrations/providers/overview.mdx index 3c921f12aa..669bd23215 100644 --- a/src/oss/python/integrations/providers/overview.mdx +++ b/src/oss/python/integrations/providers/overview.mdx @@ -1,6 +1,6 @@ --- -title: Integration packages -sidebarTitle: Overview +title: LangChain integrations packages +sidebarTitle: LangChain integrations mode: "wide" --- {/* File generated automatically by pipeline/tools/partner_pkg_table.py */} @@ -22,32 +22,32 @@ To see a full list of integrations by component type, refer to the categories in | :--- | :--- | :--- | :--- | :--- | | [OpenAI](/oss/integrations/providers/openai/) | [`langchain-openai`](https://reference.langchain.com/python/integrations/langchain_openai/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/openai) | | [Google (Vertex AI)](/oss/integrations/providers/google) | [`langchain-google-vertexai`](https://reference.langchain.com/python/integrations/langchain_google_vertexai/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/google-vertexai) | -| [Google (GenAI)](/oss/integrations/providers/google) | [`langchain-google-genai`](https://reference.langchain.com/python/integrations/langchain_google_genai/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/google-genai) | | [Anthropic (Claude)](/oss/integrations/providers/anthropic/) | [`langchain-anthropic`](https://reference.langchain.com/python/integrations/langchain_anthropic/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/anthropic) | | [AWS](/oss/integrations/providers/aws/) | [`langchain-aws`](https://reference.langchain.com/python/integrations/langchain_aws/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/aws) | +| [Google (GenAI)](/oss/integrations/providers/google) | [`langchain-google-genai`](https://reference.langchain.com/python/integrations/langchain_google_genai/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/google-genai) | | [Ollama](/oss/integrations/providers/ollama/) | [`langchain-ollama`](https://reference.langchain.com/python/integrations/langchain_ollama/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/ollama) | | [Chroma](/oss/integrations/providers/chroma/) | [`langchain-chroma`](https://reference.langchain.com/python/integrations/langchain_chroma/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [Groq](/oss/integrations/providers/groq/) | [`langchain-groq`](https://reference.langchain.com/python/integrations/langchain_groq/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/groq) | | [Huggingface](/oss/integrations/providers/huggingface/) | [`langchain-huggingface`](https://reference.langchain.com/python/integrations/langchain_huggingface/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | -| [Cohere](/oss/integrations/providers/cohere/) | [`langchain-cohere`](https://reference.langchain.com/python/integrations/langchain_cohere/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/cohere) | | [Pinecone](/oss/integrations/providers/pinecone/) | [`langchain-pinecone`](https://reference.langchain.com/python/integrations/langchain_pinecone/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/pinecone) | +| [Cohere](/oss/integrations/providers/cohere/) | [`langchain-cohere`](https://reference.langchain.com/python/integrations/langchain_cohere/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/cohere) | | [Postgres](/oss/integrations/providers/pgvector) | [`langchain-postgres`](https://reference.langchain.com/python/integrations/langchain_postgres/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [Databricks](/oss/integrations/providers/databricks/) | [`databricks-langchain`](https://pypi.org/project/databricks-langchain/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [MistralAI](/oss/integrations/providers/mistralai/) | [`langchain-mistralai`](https://reference.langchain.com/python/integrations/langchain_mistralai/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/mistralai) | -| [Fireworks](/oss/integrations/providers/fireworks/) | [`langchain-fireworks`](https://reference.langchain.com/python/integrations/langchain_fireworks/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | -| [Nvidia AI Endpoints](/oss/integrations/providers/nvidia) | [`langchain-nvidia-ai-endpoints`](https://reference.langchain.com/python/integrations/langchain_nvidia_ai_endpoints/) | Downloads per month | PyPI - Latest version | ❌ | | [Perplexity](/oss/integrations/providers/perplexity/) | [`langchain-perplexity`](https://reference.langchain.com/python/integrations/langchain_perplexity/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [MongoDB](/oss/integrations/providers/mongodb_atlas) | [`langchain-mongodb`](https://reference.langchain.com/python/integrations/langchain_mongodb/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/mongodb) | -| [IBM](/oss/integrations/providers/ibm/) | [`langchain-ibm`](https://reference.langchain.com/python/integrations/langchain_ibm/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/ibm) | | [Deepseek](/oss/integrations/providers/deepseek/) | [`langchain-deepseek`](https://reference.langchain.com/python/integrations/langchain_deepseek/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/deepseek) | +| [Fireworks](/oss/integrations/providers/fireworks/) | [`langchain-fireworks`](https://reference.langchain.com/python/integrations/langchain_fireworks/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | +| [Nvidia AI Endpoints](/oss/integrations/providers/nvidia) | [`langchain-nvidia-ai-endpoints`](https://reference.langchain.com/python/integrations/langchain_nvidia_ai_endpoints/) | Downloads per month | PyPI - Latest version | ❌ | +| [IBM](/oss/integrations/providers/ibm/) | [`langchain-ibm`](https://reference.langchain.com/python/integrations/langchain_ibm/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/ibm) | | [Qdrant](/oss/integrations/providers/qdrant/) | [`langchain-qdrant`](https://reference.langchain.com/python/integrations/langchain_qdrant/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/qdrant) | | [Milvus](/oss/integrations/providers/milvus/) | [`langchain-milvus`](https://reference.langchain.com/python/integrations/langchain_milvus/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [Tavily](/oss/integrations/providers/tavily/) | [`langchain-tavily`](https://pypi.org/project/langchain-tavily/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/tavily) | -| [Elasticsearch](/oss/integrations/providers/elasticsearch/) | [`langchain-elasticsearch`](https://reference.langchain.com/python/integrations/langchain_elasticsearch/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [xAI (Grok)](/oss/integrations/providers/xai/) | [`langchain-xai`](https://reference.langchain.com/python/integrations/langchain_xai/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/xai) | -| [LiteLLM](/oss/integrations/providers/litellm/) | [`langchain-litellm`](https://pypi.org/project/langchain-litellm/) | Downloads per month | PyPI - Latest version | N/A | -| [DataStax Astra DB](/oss/integrations/providers/astradb/) | [`langchain-astradb`](https://reference.langchain.com/python/integrations/langchain_astradb/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | +| [Elasticsearch](/oss/integrations/providers/elasticsearch/) | [`langchain-elasticsearch`](https://reference.langchain.com/python/integrations/langchain_elasticsearch/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [Azure AI](/oss/integrations/providers/azure_ai) | [`langchain-azure-ai`](https://reference.langchain.com/python/integrations/langchain_azure_ai/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/openai) | +| [DataStax Astra DB](/oss/integrations/providers/astradb/) | [`langchain-astradb`](https://reference.langchain.com/python/integrations/langchain_astradb/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | +| [LiteLLM](/oss/integrations/providers/litellm/) | [`langchain-litellm`](https://pypi.org/project/langchain-litellm/) | Downloads per month | PyPI - Latest version | N/A | | [Redis](/oss/integrations/providers/redis/) | [`langchain-redis`](https://reference.langchain.com/python/integrations/langchain_redis/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/redis) | | [Together](/oss/integrations/providers/together/) | [`langchain-together`](https://reference.langchain.com/python/integrations/langchain_together/) | Downloads per month | PyPI - Latest version | [✅](https://www.npmjs.com/package/@langchain/community) | | [MCP Toolbox (Google)](/oss/integrations/providers/toolbox/) | [`toolbox-langchain`](https://pypi.org/project/toolbox-langchain/) | Downloads per month | PyPI - Latest version | ❌ | diff --git a/src/oss/python/integrations/providers/ray_serve.mdx b/src/oss/python/integrations/providers/ray_serve.mdx index fe7380ea09..74e035de8f 100644 --- a/src/oss/python/integrations/providers/ray_serve.mdx +++ b/src/oss/python/integrations/providers/ray_serve.mdx @@ -52,7 +52,7 @@ serve.api.shutdown() Get an OpenAI API key from [here](https://platform.openai.com/account/api-keys). By running the following code, you will be asked to provide your API key. ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI ``` diff --git a/src/oss/python/integrations/providers/rebuff.mdx b/src/oss/python/integrations/providers/rebuff.mdx index fd082de5cf..f9a836f106 100644 --- a/src/oss/python/integrations/providers/rebuff.mdx +++ b/src/oss/python/integrations/providers/rebuff.mdx @@ -51,7 +51,7 @@ Metrics from individual checks ``` ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI @@ -102,7 +102,7 @@ Response (completion): SELECT HEX('55e8813b'); We can easily use rebuff in a chain to block any attempted prompt attacks ```python -from langchain.chains import SimpleSequentialChain, TransformChain +from langchain_classic.chains import SimpleSequentialChain, TransformChain from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` diff --git a/src/oss/python/integrations/providers/wandb_tracking.mdx b/src/oss/python/integrations/providers/wandb_tracking.mdx index 5f75007449..e9b0d06fc1 100644 --- a/src/oss/python/integrations/providers/wandb_tracking.mdx +++ b/src/oss/python/integrations/providers/wandb_tracking.mdx @@ -174,7 +174,7 @@ View run at +**For proper compatibility, please ensure you are using the `openai` SDK at version **0.x**.** + + +Let's load the LocalAI Embedding class with embeddings model. ```python -from langchain_community.embeddings import LocalAIEmbeddings +pip install -U langchain-community ``` ```python +from langchain_community.embeddings import LocalAIEmbeddings +import os + +# if you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass through +os.environ["OPENAI_PROXY"] = "http://proxy.yourcompany.com:8080" + embeddings = LocalAIEmbeddings( openai_api_base="http://localhost:8080", model="embedding-model-name" ) -``` -```python text = "This is a test document." -``` - -```python query_result = embeddings.embed_query(text) -``` - -```python doc_result = embeddings.embed_documents([text]) ``` - -```python -import os - -# if you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass through -os.environ["OPENAI_PROXY"] = "http://proxy.yourcompany.com:8080" -``` diff --git a/src/oss/python/integrations/tools/dalle_image_generator.mdx b/src/oss/python/integrations/tools/dalle_image_generator.mdx index b94a96b59f..3001bd0510 100644 --- a/src/oss/python/integrations/tools/dalle_image_generator.mdx +++ b/src/oss/python/integrations/tools/dalle_image_generator.mdx @@ -20,7 +20,7 @@ os.environ["OPENAI_API_KEY"] = "insertapikey" ## Run as a chain ```python -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper from langchain_core.prompts import PromptTemplate from langchain_openai import OpenAI diff --git a/src/oss/python/integrations/tools/memorize.mdx b/src/oss/python/integrations/tools/memorize.mdx index c2b8c1b312..dbae5c18e6 100644 --- a/src/oss/python/integrations/tools/memorize.mdx +++ b/src/oss/python/integrations/tools/memorize.mdx @@ -12,7 +12,7 @@ This tool requires LLMs that support fine-tuning. Currently, only `langchain.llm import os from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.memory import ConversationBufferMemory from langchain_community.llms import GradientLLM ``` diff --git a/src/oss/python/integrations/tools/reddit_search.mdx b/src/oss/python/integrations/tools/reddit_search.mdx index 06a3b1a117..da1d815d54 100644 --- a/src/oss/python/integrations/tools/reddit_search.mdx +++ b/src/oss/python/integrations/tools/reddit_search.mdx @@ -94,7 +94,7 @@ To run the example, add your reddit API access information and also get an OpenA # Adapted code from /docs/modules/agents/how_to/sharedmemory_for_tools from langchain.agents import AgentExecutor, StructuredChatAgent -from langchain.chains import LLMChain +from langchain_classic.chains import LLMChain from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory from langchain_community.tools.reddit_search.tool import RedditSearchRun from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper diff --git a/src/oss/python/integrations/tools/zapier.mdx b/src/oss/python/integrations/tools/zapier.mdx index b37ea6ea21..26a9c304d4 100644 --- a/src/oss/python/integrations/tools/zapier.mdx +++ b/src/oss/python/integrations/tools/zapier.mdx @@ -88,7 +88,7 @@ Final Answer: I have sent a summary of the last email from Silicon Valley Bank t If you need more explicit control, use a chain, like below. ```python -from langchain.chains import LLMChain, SimpleSequentialChain, TransformChain +from langchain_classic.chains import LLMChain, SimpleSequentialChain, TransformChain from langchain_community.tools.zapier.tool import ZapierNLARunAction from langchain_community.utilities.zapier import ZapierNLAWrapper from langchain_core.prompts import PromptTemplate diff --git a/src/oss/python/integrations/vectorstores/activeloop_deeplake.mdx b/src/oss/python/integrations/vectorstores/activeloop_deeplake.mdx index 6c25796edf..c4315f9ee4 100644 --- a/src/oss/python/integrations/vectorstores/activeloop_deeplake.mdx +++ b/src/oss/python/integrations/vectorstores/activeloop_deeplake.mdx @@ -86,7 +86,7 @@ Setting `read_only=True` revents accidental modifications to the vector store wh ### Retrieval Question/Answering ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_openai import ChatOpenAI qa = RetrievalQA.from_chain_type( diff --git a/src/oss/python/integrations/vectorstores/apache_doris.mdx b/src/oss/python/integrations/vectorstores/apache_doris.mdx index 91d6533763..9ad138c767 100644 --- a/src/oss/python/integrations/vectorstores/apache_doris.mdx +++ b/src/oss/python/integrations/vectorstores/apache_doris.mdx @@ -25,7 +25,7 @@ Set `update_vectordb = False` at the beginning. If there is no docs updated, the ``` ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_community.document_loaders import ( DirectoryLoader, UnstructuredMarkdownLoader, diff --git a/src/oss/python/integrations/vectorstores/aperturedb.mdx b/src/oss/python/integrations/vectorstores/aperturedb.mdx index 88e7e7ca90..417d2e1be1 100644 --- a/src/oss/python/integrations/vectorstores/aperturedb.mdx +++ b/src/oss/python/integrations/vectorstores/aperturedb.mdx @@ -115,7 +115,7 @@ Question: {input}""") # Create a chain that passes documents to an LLM -from langchain.chains.combine_documents import create_stuff_documents_chain +from langchain_classic.chains.combine_documents import create_stuff_documents_chain document_chain = create_stuff_documents_chain(llm, prompt) @@ -125,7 +125,7 @@ retriever = vector_db.as_retriever() # Create a RAG chain that connects the retriever to the LLM -from langchain.chains import create_retrieval_chain +from langchain_classic.chains import create_retrieval_chain retrieval_chain = create_retrieval_chain(retriever, document_chain) ``` diff --git a/src/oss/python/integrations/vectorstores/documentdb.mdx b/src/oss/python/integrations/vectorstores/documentdb.mdx index 8380477111..69c3ec448a 100644 --- a/src/oss/python/integrations/vectorstores/documentdb.mdx +++ b/src/oss/python/integrations/vectorstores/documentdb.mdx @@ -213,7 +213,7 @@ PROMPT = PromptTemplate( ``` ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_openai import OpenAI qa = RetrievalQA.from_chain_type( diff --git a/src/oss/python/integrations/vectorstores/google_scann.mdx b/src/oss/python/integrations/vectorstores/google_scann.mdx index b5eef8b294..371bfddadb 100644 --- a/src/oss/python/integrations/vectorstores/google_scann.mdx +++ b/src/oss/python/integrations/vectorstores/google_scann.mdx @@ -49,7 +49,7 @@ Next, we demonstrate using ScaNN in conjunction with Google PaLM API. You can obtain an API key from [developers.generativeai.google/tutorials/setup](https://developers.generativeai.google/tutorials/setup) ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_community.chat_models.google_palm import ChatGooglePalm palm_client = ChatGooglePalm(google_api_key="YOUR_GOOGLE_PALM_API_KEY") diff --git a/src/oss/python/integrations/vectorstores/google_vertex_ai_vector_search.mdx b/src/oss/python/integrations/vectorstores/google_vertex_ai_vector_search.mdx index a9c8a4f9b7..dea68f43e4 100644 --- a/src/oss/python/integrations/vectorstores/google_vertex_ai_vector_search.mdx +++ b/src/oss/python/integrations/vectorstores/google_vertex_ai_vector_search.mdx @@ -316,7 +316,7 @@ llm = VertexAI(model_name="gemini-pro") ``` ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA filters = [Namespace(name="season", allow_tokens=["spring"])] numeric_filters = [NumericNamespace(name="price", value_float=40.0, op="LESS")] diff --git a/src/oss/python/integrations/vectorstores/index.mdx b/src/oss/python/integrations/vectorstores/index.mdx index dd18874e2b..ea42c9ba81 100644 --- a/src/oss/python/integrations/vectorstores/index.mdx +++ b/src/oss/python/integrations/vectorstores/index.mdx @@ -383,6 +383,37 @@ from langchain_core.vectorstores import InMemoryVectorStore vector_store = InMemoryVectorStore(embeddings) ``` + + + + +```bash pip +pip install -qU boto3 +``` + +```python +from opensearchpy import RequestsHttpConnection + +service = "es" # must set the service as 'es' +region = "us-east-2" +credentials = boto3.Session( + aws_access_key_id="xxxxxx", aws_secret_access_key="xxxxx" +).get_credentials() +awsauth = AWS4Auth("xxxxx", "xxxxxx", region, service, session_token=credentials.token) + +vector_store = OpenSearchVectorSearch.from_documents( + docs, + embeddings, + opensearch_url="host url", + http_auth=awsauth, + timeout=300, + use_ssl=True, + verify_certs=True, + connection_class=RequestsHttpConnection, + index_name="test-index", +) +``` + diff --git a/src/oss/python/integrations/vectorstores/jaguar.mdx b/src/oss/python/integrations/vectorstores/jaguar.mdx index 0d2e09aa20..3a1b585870 100644 --- a/src/oss/python/integrations/vectorstores/jaguar.mdx +++ b/src/oss/python/integrations/vectorstores/jaguar.mdx @@ -47,7 +47,7 @@ There are two requirements for running the examples in this file. This section demonstrates chatting with LLM together with Jaguar in the langchain software stack. ```python -from langchain.chains import RetrievalQAWithSourcesChain +from langchain_classic.chains import RetrievalQAWithSourcesChain from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores.jaguar import Jaguar from langchain_core.output_parsers import StrOutputParser diff --git a/src/oss/python/integrations/vectorstores/marqo.mdx b/src/oss/python/integrations/vectorstores/marqo.mdx index 39a348279e..1a2bbc71b6 100644 --- a/src/oss/python/integrations/vectorstores/marqo.mdx +++ b/src/oss/python/integrations/vectorstores/marqo.mdx @@ -279,7 +279,7 @@ This section shows how to use Marqo as part of a `RetrievalQAWithSourcesChain`. import getpass import os -from langchain.chains import RetrievalQAWithSourcesChain +from langchain_classic.chains import RetrievalQAWithSourcesChain from langchain_openai import OpenAI if "OPENAI_API_KEY" not in os.environ: diff --git a/src/oss/python/integrations/vectorstores/momento_vector_index.mdx b/src/oss/python/integrations/vectorstores/momento_vector_index.mdx index 624681388a..94c7b74081 100644 --- a/src/oss/python/integrations/vectorstores/momento_vector_index.mdx +++ b/src/oss/python/integrations/vectorstores/momento_vector_index.mdx @@ -130,7 +130,7 @@ With the data indexed in MVI, we can integrate with any chain that leverages vec First we load the relevant modules: ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_openai import ChatOpenAI ``` diff --git a/src/oss/python/integrations/vectorstores/neo4jvector.mdx b/src/oss/python/integrations/vectorstores/neo4jvector.mdx index 934de91a94..7a0261ef0e 100644 --- a/src/oss/python/integrations/vectorstores/neo4jvector.mdx +++ b/src/oss/python/integrations/vectorstores/neo4jvector.mdx @@ -428,7 +428,7 @@ Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vot This section goes over how to do question-answering with sources over an Index. It does this by using the `RetrievalQAWithSourcesChain`, which does the lookup of the documents from an Index. ```python -from langchain.chains import RetrievalQAWithSourcesChain +from langchain_classic.chains import RetrievalQAWithSourcesChain from langchain_openai import ChatOpenAI ``` diff --git a/src/oss/python/integrations/vectorstores/sap_hanavector.mdx b/src/oss/python/integrations/vectorstores/sap_hanavector.mdx index 63acbbce99..d9eb020531 100644 --- a/src/oss/python/integrations/vectorstores/sap_hanavector.mdx +++ b/src/oss/python/integrations/vectorstores/sap_hanavector.mdx @@ -579,7 +579,7 @@ chain_type_kwargs = {"prompt": PROMPT} Create the ConversationalRetrievalChain, which handles the chat history and the retrieval of similar document chunks to be added to the prompt. ```python -from langchain.chains import ConversationalRetrievalChain +from langchain_classic.chains import ConversationalRetrievalChain from langchain.memory import ConversationBufferMemory from langchain_openai import ChatOpenAI diff --git a/src/oss/python/integrations/vectorstores/sqlserver.mdx b/src/oss/python/integrations/vectorstores/sqlserver.mdx index dc4b6388bd..3f880f8267 100644 --- a/src/oss/python/integrations/vectorstores/sqlserver.mdx +++ b/src/oss/python/integrations/vectorstores/sqlserver.mdx @@ -377,8 +377,8 @@ Read more about LangChain RAG tutorials & the terminologies mentioned above [her from typing import List, Tuple import pandas as pd -from langchain.chains import create_retrieval_chain -from langchain.chains.combine_documents import create_stuff_documents_chain +from langchain_classic.chains import create_retrieval_chain +from langchain_classic.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate diff --git a/src/oss/python/integrations/vectorstores/starrocks.mdx b/src/oss/python/integrations/vectorstores/starrocks.mdx index 331039451f..f11dbcfbf6 100644 --- a/src/oss/python/integrations/vectorstores/starrocks.mdx +++ b/src/oss/python/integrations/vectorstores/starrocks.mdx @@ -18,7 +18,7 @@ pip install -qU pymysql langchain-community Set `update_vectordb = False` at the beginning. If there is no docs updated, then we don't need to rebuild the embeddings of docs ```python -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA from langchain_community.document_loaders import ( DirectoryLoader, UnstructuredMarkdownLoader, diff --git a/src/oss/python/integrations/vectorstores/timescalevector.mdx b/src/oss/python/integrations/vectorstores/timescalevector.mdx index 8ee74eb241..75fed37fd6 100644 --- a/src/oss/python/integrations/vectorstores/timescalevector.mdx +++ b/src/oss/python/integrations/vectorstores/timescalevector.mdx @@ -227,7 +227,7 @@ from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k") # Initialize a RetrievalQA class from a stuff chain -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA qa_stuff = RetrievalQA.from_chain_type( llm=llm, @@ -665,7 +665,7 @@ from langchain_openai import ChatOpenAI llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k") -from langchain.chains import RetrievalQA +from langchain_classic.chains import RetrievalQA qa_stuff = RetrievalQA.from_chain_type( llm=llm, @@ -788,7 +788,7 @@ vectorstore = TimescaleVector( Next we'll create our self-querying retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents. ```python -from langchain.chains.query_constructor.base import AttributeInfo +from langchain_classic.chains.query_constructor.base import AttributeInfo from langchain_classic.retrievers.self_query.base import SelfQueryRetriever from langchain_openai import OpenAI diff --git a/src/oss/python/integrations/vectorstores/weaviate.mdx b/src/oss/python/integrations/vectorstores/weaviate.mdx index 1a4a34a4bb..4ff52c8bff 100644 --- a/src/oss/python/integrations/vectorstores/weaviate.mdx +++ b/src/oss/python/integrations/vectorstores/weaviate.mdx @@ -312,7 +312,7 @@ This section uses the `RetrievalQAWithSourcesChain`, which does the lookup of th First, we will chunk the text again and import them into the Weaviate vector store. ```python -from langchain.chains import RetrievalQAWithSourcesChain +from langchain_classic.chains import RetrievalQAWithSourcesChain from langchain_openai import OpenAI ``` diff --git a/src/oss/python/integrations/vectorstores/yellowbrick.mdx b/src/oss/python/integrations/vectorstores/yellowbrick.mdx index e2e26bc16e..8ef7f6135d 100644 --- a/src/oss/python/integrations/vectorstores/yellowbrick.mdx +++ b/src/oss/python/integrations/vectorstores/yellowbrick.mdx @@ -56,7 +56,7 @@ from getpass import getpass import psycopg2 from IPython.display import Markdown, display -from langchain.chains import LLMChain, RetrievalQAWithSourcesChain +from langchain_classic.chains import LLMChain, RetrievalQAWithSourcesChain from langchain_community.vectorstores import Yellowbrick from langchain_core.documents import Document from langchain_openai import ChatOpenAI, OpenAIEmbeddings diff --git a/src/oss/python/migrate/langchain-v1.mdx b/src/oss/python/migrate/langchain-v1.mdx index f2f90d5f0d..3db27af2fb 100644 --- a/src/oss/python/migrate/langchain-v1.mdx +++ b/src/oss/python/migrate/langchain-v1.mdx @@ -48,7 +48,7 @@ If you were using any of the following from the `langchain` package, you'll need ```python v0 (old) # Chains - from langchain.chains import LLMChain + from langchain_classic.chains import LLMChain # Retrievers from langchain.retrievers import ... @@ -917,7 +917,7 @@ If you were using any of the following from the `langchain` package, you'll need ```python v0 (old) # Chains - from langchain.chains import LLMChain + from langchain_classic.chains import LLMChain # Retrievers from langchain.retrievers import ... diff --git a/src/oss/python/releases/changelog.mdx b/src/oss/python/releases/changelog.mdx index faf821822d..c1a432d89c 100644 --- a/src/oss/python/releases/changelog.mdx +++ b/src/oss/python/releases/changelog.mdx @@ -10,6 +10,14 @@ rss: true + + ## `langchain` v1.2.0 + + * [`create_agent`](/oss/langchain/agents): Simplified support for provider-specific tool parameters and definitions via a new [`extras`](https://reference.langchain.com/python/langchain/tools/#langchain.tools.BaseTool.extras) attribute on [tools](/oss/langchain/tools). Examples: + * Provider-specific configuration such as Anthropic's [programmatic tool calling](/oss/integrations/chat/anthropic#programmatic-tool-calling) and [tool search](/oss/integrations/chat/anthropic#tool-search). + * Built-in tools that are executed client-side, as supported by [Anthropic](/oss/integrations/chat/anthropic#built-in-tools), [OpenAI](/oss/integrations/chat/openai#responses-api), and other providers. + * Support for strict schema-adherence in agent `response_format` (see [`ProviderStrategy`](/oss/langchain/structured-output#provider-strategy) docs). + ## `langchain-google-genai` v4.0.0 diff --git a/src/snippets/langsmith/framework-agnostic-js.mdx b/src/snippets/langsmith/framework-agnostic-js.mdx deleted file mode 100644 index 2d968851ae..0000000000 --- a/src/snippets/langsmith/framework-agnostic-js.mdx +++ /dev/null @@ -1 +0,0 @@ -LangSmith Deployment supports deploying a [LangGraph](/oss/javascript/langgraph/overview) _graph_. However, the implementation of a _node_ of a graph can contain arbitrary Python code. This means any framework can be implemented within a node and deployed on LangSmith Deployment. This lets you keep your core application logic outside LangGraph while still using LangSmith for [deployment](/langsmith/deployments), scaling, and [observability](/langsmith/observability). diff --git a/src/snippets/langsmith/framework-agnostic-py.mdx b/src/snippets/langsmith/framework-agnostic-py.mdx deleted file mode 100644 index d23e8ec108..0000000000 --- a/src/snippets/langsmith/framework-agnostic-py.mdx +++ /dev/null @@ -1 +0,0 @@ -LangSmith Deployment supports deploying a [LangGraph](/oss/python/langgraph/overview) _graph_. However, the implementation of a _node_ of a graph can contain arbitrary Python code. This means any framework can be implemented within a node and deployed on LangSmith Deployment. This lets you keep your core application logic outside LangGraph while still using LangSmith for [deployment](/langsmith/deployments), scaling, and [observability](/langsmith/observability). diff --git a/src/snippets/langsmith/framework-agnostic.mdx b/src/snippets/langsmith/framework-agnostic.mdx new file mode 100644 index 0000000000..e28d0d4910 --- /dev/null +++ b/src/snippets/langsmith/framework-agnostic.mdx @@ -0,0 +1 @@ +LangSmith Deployment supports deploying a [LangGraph](/oss/python/langgraph/overview) _graph_. However, the implementation of a _node_ of a graph can contain arbitrary code. This means any framework can be implemented within a node and deployed on LangSmith Deployment. This lets you implement your core application logic without using additional LangGraph OSS APIs while still using LangSmith for [deployment](/langsmith/deployments), scaling, and [observability](/langsmith/observability). For more details, refer to [Use any framework with LangSmith Deployment](/langsmith/application-structure#use-any-framework-with-langsmith-deployment). diff --git a/src/snippets/oss/deploy.mdx b/src/snippets/oss/deploy.mdx index df286fa80b..2f51cdef69 100644 --- a/src/snippets/oss/deploy.mdx +++ b/src/snippets/oss/deploy.mdx @@ -2,7 +2,7 @@ ### 2. Deploy to LangSmith - + Log in to [LangSmith](https://smith.langchain.com/). In the left sidebar, select **Deployments**. diff --git a/src/snippets/vectorstore-tabs-py.mdx b/src/snippets/vectorstore-tabs-py.mdx index 97ad523ffd..694015a106 100644 --- a/src/snippets/vectorstore-tabs-py.mdx +++ b/src/snippets/vectorstore-tabs-py.mdx @@ -10,6 +10,38 @@ vector_store = InMemoryVectorStore(embeddings) ``` + + + + ```shell + pip install -qU boto3 + ``` + + ```python + from opensearchpy import RequestsHttpConnection + + service = "es" # must set the service as 'es' + region = "us-east-2" + credentials = boto3.Session( + aws_access_key_id="xxxxxx", aws_secret_access_key="xxxxx" + ).get_credentials() + awsauth = AWS4Auth("xxxxx", "xxxxxx", region, service, session_token=credentials.token) + + vector_store = OpenSearchVectorSearch.from_documents( + docs, + embeddings, + opensearch_url="host url", + http_auth=awsauth, + timeout=300, + use_ssl=True, + verify_certs=True, + connection_class=RequestsHttpConnection, + index_name="test-index", + ) + ``` + + + ```shell pip install -U "langchain-astradb"