From 963db633a23ac78aa2d1bf47c5ec32cbc06fbfc0 Mon Sep 17 00:00:00 2001 From: William FH <13333726+hinthornw@users.noreply.github.com> Date: Sun, 19 May 2024 22:18:30 -0700 Subject: [PATCH] Fix TS in docs (#170) --- examples/how-tos/branching.ipynb | 222 ++--- examples/how-tos/configuration.ipynb | 171 ++-- .../dynamically-returning-directly.ipynb | 338 +++---- .../how-tos/force-calling-a-tool-first.ipynb | 424 +++----- examples/how-tos/human-in-the-loop.ipynb | 293 +++--- examples/how-tos/managing-agent-steps.ipynb | 442 +++------ examples/how-tos/persistence.ipynb | 432 ++++---- examples/how-tos/respond-in-format.ipynb | 349 +++---- examples/how-tos/stream-tokens.ipynb | 189 ++-- examples/how-tos/subgraph.ipynb | 255 ++--- examples/how-tos/time-travel.ipynb | 919 ++++++------------ langgraph/package.json | 3 +- package.json | 2 + yarn.lock | 49 + 14 files changed, 1676 insertions(+), 2412 deletions(-) diff --git a/examples/how-tos/branching.ipynb b/examples/how-tos/branching.ipynb index 3c7f38ae..855f8d12 100644 --- a/examples/how-tos/branching.ipynb +++ b/examples/how-tos/branching.ipynb @@ -24,22 +24,31 @@ "\n", "This guide will use OpenAI's GPT-4o model. We will optionally set our API key\n", "for [LangSmith tracing](https://smith.langchain.com/), which will give us\n", - "best-in-class observability." + "best-in-class observability.\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Branching: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"OPENAI_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "// Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Configuration: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\"\n", + "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Branching: LangGraphJS\";" ] }, { @@ -50,12 +59,12 @@ "\n", "First, we will make a simple graph that branches out and back in. When merging\n", "back in, the state updates from all branches are applied by your **reducer**\n", - "(the `aggregate` method below)." + "(the `aggregate` method below).\n" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "metadata": {}, "outputs": [ { @@ -65,30 +74,21 @@ "Adding I'm A to \n", "Adding I'm B to I'm A\n", "Adding I'm C to I'm A\n", - "Adding I'm D to I'm A,I'm B,I'm C\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Base Result: { aggregate: [ \"I'm A\", \"I'm B\", \"I'm C\", \"I'm D\" ] }\n" + "Adding I'm D to I'm A,I'm B,I'm C\n", + "Base Result: { aggregate: [ \u001b[32m\"I'm A\"\u001b[39m, \u001b[32m\"I'm B\"\u001b[39m, \u001b[32m\"I'm C\"\u001b[39m, \u001b[32m\"I'm D\"\u001b[39m ] }\n" ] } ], "source": [ "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", "\n", "// Define the state type\n", "interface IState {\n", - " // The operator.add reducer function makes this append-only\n", - " aggregate: {\n", - " value: (x: string[], y: string[]) => string[];\n", - " default: () => string[];\n", - " };\n", + " aggregate: string[];\n", "}\n", "\n", - "const graphState: IState = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " aggregate: {\n", " value: (x: string[], y: string[]) => x.concat(y),\n", " default: () => [],\n", @@ -99,37 +99,39 @@ "class ReturnNodeValue {\n", " private _value: string;\n", "\n", - " constructor(nodeSecret: string) {\n", - " this._value = nodeSecret;\n", + " constructor(value: string) {\n", + " this._value = value;\n", " }\n", "\n", - " public call(state: State): Partial {\n", + " public call(state: IState) {\n", " console.log(`Adding ${this._value} to ${state.aggregate}`);\n", " return { aggregate: [this._value] };\n", " }\n", "}\n", "\n", "// Create the graph\n", - "const builder = new StateGraph({ channels: graphState });\n", "const nodeA = new ReturnNodeValue(\"I'm A\");\n", - "builder.addNode(\"a\", nodeA.call.bind(nodeA));\n", - "builder.addEdge(START, \"a\");\n", "const nodeB = new ReturnNodeValue(\"I'm B\");\n", "const nodeC = new ReturnNodeValue(\"I'm C\");\n", "const nodeD = new ReturnNodeValue(\"I'm D\");\n", - "builder.addNode(\"b\", nodeB.call.bind(nodeB));\n", - "builder.addNode(\"c\", nodeC.call.bind(nodeC));\n", - "builder.addNode(\"d\", nodeD.call.bind(nodeD));\n", - "builder.addEdge(\"a\", \"b\");\n", - "builder.addEdge(\"a\", \"c\");\n", - "builder.addEdge(\"b\", \"d\");\n", - "builder.addEdge(\"c\", \"d\");\n", - "builder.addEdge(\"d\", END);\n", + "\n", + "const builder = new StateGraph({ channels: graphState })\n", + " .addNode(\"a\", nodeA.call.bind(nodeA))\n", + " .addEdge(START, \"a\")\n", + " .addNode(\"b\", nodeB.call.bind(nodeB))\n", + " .addNode(\"c\", nodeC.call.bind(nodeC))\n", + " .addNode(\"d\", nodeD.call.bind(nodeD))\n", + " .addEdge(\"a\", \"b\")\n", + " .addEdge(\"a\", \"c\")\n", + " .addEdge(\"b\", \"d\")\n", + " .addEdge(\"c\", \"d\")\n", + " .addEdge(\"d\", END);\n", + "\n", "const graph = builder.compile();\n", "\n", "// Invoke the graph\n", "const baseResult = await graph.invoke({ aggregate: [] });\n", - "console.log(\"Base Result: \", baseResult);" + "console.log(\"Base Result: \", baseResult);\n" ] }, { @@ -144,12 +146,12 @@ "\n", "If you have a known \"sink\" node that the conditional branches will route to\n", "afterwards, you can provide `then=` when creating the\n", - "conditional edges." + "conditional edges.\n" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -160,7 +162,7 @@ "Adding I'm B to I'm A\n", "Adding I'm C to I'm A\n", "Adding I'm E to I'm A,I'm B,I'm C\n", - "Result 1: { aggregate: [ \"I'm A\", \"I'm B\", \"I'm C\", \"I'm E\" ], which: \"bc\" }\n" + "Result 1: { aggregate: [ \u001b[32m\"I'm A\"\u001b[39m, \u001b[32m\"I'm B\"\u001b[39m, \u001b[32m\"I'm C\"\u001b[39m, \u001b[32m\"I'm E\"\u001b[39m ], which: \u001b[32m'bc'\u001b[39m }\n" ] } ], @@ -168,17 +170,11 @@ "// Define the state type\n", "interface IState2 {\n", " // The operator.add reducer function makes this append-only\n", - " aggregate: {\n", - " value: (x: string[], y: string[]) => string[];\n", - " default: () => string[];\n", - " };\n", - " which: {\n", - " value: (x: string, y: string) => string;\n", - " default: () => string;\n", - " };\n", + " aggregate: string[];\n", + " which: string;\n", "}\n", "\n", - "const graphState: IState2 = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " aggregate: {\n", " value: (x: string[], y: string[]) => x.concat(y),\n", " default: () => [],\n", @@ -190,18 +186,11 @@ "};\n", "\n", "// Create the graph\n", - "const builder2 = new StateGraph({ channels: graphState });\n", "const nodeA2 = new ReturnNodeValue(\"I'm A\");\n", - "builder2.addNode(\"a\", nodeA.call.bind(nodeA));\n", - "builder2.addEdge(START, \"a\");\n", "const nodeB2 = new ReturnNodeValue(\"I'm B\");\n", "const nodeC2 = new ReturnNodeValue(\"I'm C\");\n", "const nodeD2 = new ReturnNodeValue(\"I'm D\");\n", "const nodeE2 = new ReturnNodeValue(\"I'm E\");\n", - "builder2.addNode(\"b\", nodeB2.call.bind(nodeB2));\n", - "builder2.addNode(\"c\", nodeC2.call.bind(nodeC2));\n", - "builder2.addNode(\"d\", nodeD2.call.bind(nodeD2));\n", - "builder2.addNode(\"e\", nodeE2.call.bind(nodeE2));\n", "// Define the route function\n", "function routeBCOrCD(state: IState2): string[] {\n", " if (state.which === \"cd\") {\n", @@ -210,13 +199,19 @@ " return [\"b\", \"c\"];\n", "}\n", "\n", - "// Add conditional edges\n", - "builder2.addConditionalEdges(\"a\", routeBCOrCD, { b: \"b\", c: \"c\", d: \"d\" });\n", - "\n", - "builder2.addEdge(\"b\", \"e\");\n", - "builder2.addEdge(\"c\", \"e\");\n", - "builder2.addEdge(\"d\", \"e\");\n", - "builder2.addEdge(\"e\", END);\n", + "const builder2 = new StateGraph({ channels: graphState })\n", + " .addNode(\"a\", nodeA2.call.bind(nodeA2))\n", + " .addEdge(START, \"a\")\n", + " .addNode(\"b\", nodeB2.call.bind(nodeB2))\n", + " .addNode(\"c\", nodeC2.call.bind(nodeC2))\n", + " .addNode(\"d\", nodeD2.call.bind(nodeD2))\n", + " .addNode(\"e\", nodeE2.call.bind(nodeE2))\n", + " // Add conditional edges\n", + " .addConditionalEdges(\"a\", routeBCOrCD, { b: \"b\", c: \"c\", d: \"d\" })\n", + " .addEdge(\"b\", \"e\")\n", + " .addEdge(\"c\", \"e\")\n", + " .addEdge(\"d\", \"e\")\n", + " .addEdge(\"e\", END);\n", "\n", "const graph2 = builder2.compile();\n", "\n", @@ -227,7 +222,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -238,7 +233,7 @@ "Adding I'm C to I'm A\n", "Adding I'm D to I'm A\n", "Adding I'm E to I'm A,I'm C,I'm D\n", - "Result 2: { aggregate: [ \"I'm A\", \"I'm C\", \"I'm D\", \"I'm E\" ], which: \"cd\" }\n" + "Result 2: { aggregate: [ \u001b[32m\"I'm A\"\u001b[39m, \u001b[32m\"I'm C\"\u001b[39m, \u001b[32m\"I'm D\"\u001b[39m, \u001b[32m\"I'm E\"\u001b[39m ], which: \u001b[32m'cd'\u001b[39m }\n" ] } ], @@ -247,13 +242,6 @@ "console.log(\"Result 2: \", g2result);\n" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", "metadata": {}, @@ -270,12 +258,12 @@ "regular `edge`s from each of the fanout nodes to the rendezvous point.\n", "\n", "For instance, suppose I want to order the outputs of the parallel step by\n", - "\"reliability\"." + "\"reliability\".\n" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -286,8 +274,8 @@ "Adding I'm B to I'm A\n", "Adding I'm C to I'm A\n", "Result 1: {\n", - " aggregate: [ \"I'm A\", \"I'm C\", \"I'm B\", \"I'm E\" ],\n", - " which: \"bc\",\n", + " aggregate: [ \u001b[32m\"I'm A\"\u001b[39m, \u001b[32m\"I'm C\"\u001b[39m, \u001b[32m\"I'm B\"\u001b[39m, \u001b[32m\"I'm E\"\u001b[39m ],\n", + " which: \u001b[32m'bc'\u001b[39m,\n", " fanoutValues: []\n", "}\n" ] @@ -314,21 +302,12 @@ "// 'value' defines the 'reducer', which determines how updates are applied\n", "// 'default' defines the default value for the state\n", "interface IState3 {\n", - " aggregate: {\n", - " value: (x: string[], y: string[]) => string[];\n", - " default: () => string[];\n", - " };\n", - " which: {\n", - " value: (x: string, y: string) => string;\n", - " default: string;\n", - " };\n", - " fanoutValues: {\n", - " value: (x?: ScoredValue[], y?: ScoredValue[]) => ScoredValue[];\n", - " default: () => ScoredValue[];\n", - " };\n", + " aggregate: string[];\n", + " which: string;\n", + " fanoutValues: ScoredValue[];\n", "}\n", "\n", - "const graphState3: IState3 = {\n", + "const graphState3: StateGraphArgs[\"channels\"] = {\n", " aggregate: {\n", " value: (x: string[], y: string[]) => x.concat(y),\n", " default: () => [],\n", @@ -359,10 +338,9 @@ "}\n", "\n", "// Create the graph\n", - "const builder3 = new StateGraph({ channels: graphState3 });\n", + "\n", "const nodeA3 = new ReturnNodeValue(\"I'm A\");\n", - "builder3.addNode(\"a\", nodeA.call.bind(nodeA));\n", - "builder3.addEdge(START, \"a\");\n", + "\n", "const nodeB3 = new ParallelReturnNodeValue(\"I'm B\", 0.1);\n", "const nodeC3 = new ParallelReturnNodeValue(\"I'm C\", 0.9);\n", "const nodeD3 = new ParallelReturnNodeValue(\"I'm D\", 0.3);\n", @@ -375,10 +353,7 @@ " fanoutValues: [],\n", " };\n", "};\n", - "builder3.addNode(\"b\", nodeB3.call.bind(nodeB3));\n", - "builder3.addNode(\"c\", nodeC3.call.bind(nodeC3));\n", - "builder3.addNode(\"d\", nodeD3.call.bind(nodeD3));\n", - "builder3.addNode(\"e\", aggregateFanouts);\n", + "\n", "// Define the route function\n", "function routeBCOrCD(state: { which: string }): string[] {\n", " if (state.which === \"cd\") {\n", @@ -387,13 +362,18 @@ " return [\"b\", \"c\"];\n", "}\n", "\n", - "// Add conditional edges\n", - "builder3.addConditionalEdges(\"a\", routeBCOrCD, { b: \"b\", c: \"c\", d: \"d\" });\n", - "\n", - "builder3.addEdge(\"b\", \"e\");\n", - "builder3.addEdge(\"c\", \"e\");\n", - "builder3.addEdge(\"d\", \"e\");\n", - "builder3.addEdge(\"e\", END);\n", + "const builder3 = new StateGraph({ channels: graphState3 })\n", + " .addNode(\"a\", nodeA3.call.bind(nodeA3))\n", + " .addEdge(START, \"a\")\n", + " .addNode(\"b\", nodeB3.call.bind(nodeB3))\n", + " .addNode(\"c\", nodeC3.call.bind(nodeC3))\n", + " .addNode(\"d\", nodeD3.call.bind(nodeD3))\n", + " .addNode(\"e\", aggregateFanouts)\n", + " .addConditionalEdges(\"a\", routeBCOrCD, { b: \"b\", c: \"c\", d: \"d\" })\n", + " .addEdge(\"b\", \"e\")\n", + " .addEdge(\"c\", \"e\")\n", + " .addEdge(\"d\", \"e\")\n", + " .addEdge(\"e\", END);\n", "\n", "const graph3 = builder3.compile();\n", "\n", @@ -409,12 +389,12 @@ "Our aggregateFanouts \"sink\" node in this case took the mapped values and then\n", "sorted them in a consistent way. Notice that, because it returns an empty array\n", "for `fanoutValues`, our `reduceFanouts` reducer function decided to overwrite\n", - "the previous values in the state." + "the previous values in the state.\n" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -425,8 +405,8 @@ "Adding I'm C to I'm A\n", "Adding I'm D to I'm A\n", "Result 2: {\n", - " aggregate: [ \"I'm A\", \"I'm C\", \"I'm D\", \"I'm E\" ],\n", - " which: \"cd\",\n", + " aggregate: [ \u001b[32m\"I'm A\"\u001b[39m, \u001b[32m\"I'm C\"\u001b[39m, \u001b[32m\"I'm D\"\u001b[39m, \u001b[32m\"I'm E\"\u001b[39m ],\n", + " which: \u001b[32m'cd'\u001b[39m,\n", " fanoutValues: []\n", "}\n" ] @@ -434,25 +414,35 @@ ], "source": [ "let g3result2 = await graph3.invoke({ aggregate: [], which: \"cd\" });\n", - "console.log(\"Result 2: \", g3result2);" + "console.log(\"Result 2: \", g3result2);\n" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/examples/how-tos/configuration.ipynb b/examples/how-tos/configuration.ipynb index 341d09bd..d561e272 100644 --- a/examples/how-tos/configuration.ipynb +++ b/examples/how-tos/configuration.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "8221c040", "metadata": {}, "source": [ "# Configuration\n", @@ -32,20 +33,32 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, - "outputs": [], + "id": "f0dcd657", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Configuration: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"OPENAI_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "// Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Configuration: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", + "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Configuration: LangGraphJS\";" ] }, { "cell_type": "markdown", + "id": "a04f018e", "metadata": {}, "source": [ "## Define the graph\n", @@ -56,72 +69,17 @@ { "cell_type": "code", "execution_count": 2, + "id": "bdf2fe0f", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "[WARN]: You have enabled LangSmith tracing without backgrounding callbacks.\n", - "[WARN]: If you are not using a serverless environment where you must wait for tracing calls to finish,\n", - "[WARN]: we suggest setting \"process.env.LANGCHAIN_CALLBACKS_BACKGROUND=true\" to avoid additional latency.\n", "[WARN]: You have enabled LangSmith tracing without backgrounding callbacks.\n", "[WARN]: If you are not using a serverless environment where you must wait for tracing calls to finish,\n", "[WARN]: we suggest setting \"process.env.LANGCHAIN_CALLBACKS_BACKGROUND=true\" to avoid additional latency.\n" ] - }, - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " fetchUserInfo: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: fetchUserInformation]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: fetchUserInformation]\u001b[39m\n", - " },\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " }\n", - " },\n", - " edges: Set(3) {\n", - " [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"fetchUserInfo\"\u001b[39m ],\n", - " [ \u001b[32m\"fetchUserInfo\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ],\n", - " [ \u001b[32m\"agent\"\u001b[39m, \u001b[32m\"__end__\"\u001b[39m ]\n", - " },\n", - " branches: {},\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " },\n", - " userInfo: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: \u001b[32m\"N/A\"\u001b[39m,\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" } ], "source": [ @@ -130,21 +88,21 @@ "import { ChatAnthropic } from \"@langchain/anthropic\";\n", "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", "import { RunnableConfig } from \"@langchain/core/runnables\";\n", - "import { END, MemorySaver, START, StateGraph } from \"@langchain/langgraph\";\n", + "import {\n", + " END,\n", + " MemorySaver,\n", + " START,\n", + " StateGraph,\n", + " StateGraphArgs,\n", + "} from \"@langchain/langgraph\";\n", "\n", "interface IState {\n", - " messages: {\n", - " value: (x: BaseMessage[], y: BaseMessage[]) => BaseMessage[];\n", - " default: () => BaseMessage[];\n", - " };\n", - " userInfo: {\n", - " value: (x: string, y: string) => string;\n", - " default: () => string;\n", - " };\n", + " messages: BaseMessage[];\n", + " userInfo: string;\n", "}\n", "\n", "// This defines the agent state\n", - "const graphState: IState = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", @@ -163,7 +121,7 @@ "]);\n", "\n", "const callModel = async (\n", - " state: { messages: Array; userInfo: string },\n", + " state: { messages: BaseMessage[]; userInfo: string },\n", " config: RunnableConfig,\n", ") => {\n", " const { messages, userInfo } = state;\n", @@ -182,12 +140,8 @@ " return { messages: [response] };\n", "};\n", "\n", - "const workflow = new StateGraph({\n", - " channels: graphState,\n", - "});\n", - "\n", "const fetchUserInformation = async (\n", - " _: { messages: Array },\n", + " _: { messages: BaseMessage[] },\n", " config: RunnableConfig,\n", ") => {\n", " const userDB = {\n", @@ -204,7 +158,7 @@ " };\n", " const userId = config?.configurable?.user;\n", " if (userId) {\n", - " const user = userDB[userId];\n", + " const user = userDB[userId as keyof typeof userDB];\n", " if (user) {\n", " return {\n", " userInfo:\n", @@ -215,15 +169,14 @@ " return { userInfo: \"N/A\" };\n", "};\n", "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"fetchUserInfo\", fetchUserInformation);\n", - "workflow.addNode(\"agent\", callModel);\n", - "\n", - "// Set the entrypoint as `fetchUserInfo`\n", - "// so we can always start from there\n", - "workflow.addEdge(START, \"fetchUserInfo\");\n", - "workflow.addEdge(\"fetchUserInfo\", \"agent\");\n", - "workflow.addEdge(\"agent\", END);\n", + "const workflow = new StateGraph({\n", + " channels: graphState,\n", + "})\n", + " .addNode(\"fetchUserInfo\", fetchUserInformation)\n", + " .addNode(\"agent\", callModel)\n", + " .addEdge(START, \"fetchUserInfo\")\n", + " .addEdge(\"fetchUserInfo\", \"agent\")\n", + " .addEdge(\"agent\", END);\n", "\n", "// Here we only save in-memory\n", "let memory = new MemorySaver();\n", @@ -232,6 +185,7 @@ }, { "cell_type": "markdown", + "id": "9ae55d0e", "metadata": {}, "source": [ "## Call with config\n" @@ -240,6 +194,7 @@ { "cell_type": "code", "execution_count": 3, + "id": "ca608969", "metadata": {}, "outputs": [ { @@ -251,14 +206,8 @@ "\n", "Could you remind me of my email??\n", "-----\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Sure, John! Your email is jod@langchain.ai. How can I assist you further?\n", + "\n", + "Sure, John! Your email address is jod@langchain.ai.\n", "-----\n", "\n" ] @@ -294,6 +243,7 @@ }, { "cell_type": "markdown", + "id": "1afdf011", "metadata": {}, "source": [ "## Change the config\n", @@ -304,6 +254,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "e568e8e3", "metadata": {}, "outputs": [ { @@ -316,7 +267,7 @@ "Could you remind me of my email??\n", "-----\n", "\n", - "Of course, Jane! Your email is jad@langchain.ai. If you need any further assistance, feel free to ask!\n", + "Of course, Jane! Your email address is jad@langchain.ai.\n", "-----\n", "\n" ] @@ -352,6 +303,7 @@ }, { "cell_type": "markdown", + "id": "f000b97c", "metadata": {}, "source": [ "Check out the\n", @@ -361,25 +313,32 @@ }, { "cell_type": "markdown", + "id": "d55f98e1", "metadata": {}, - "source": [] + "source": [ + "```\n", + "```" + ] } ], "metadata": { "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/examples/how-tos/dynamically-returning-directly.ipynb b/examples/how-tos/dynamically-returning-directly.ipynb index 5c78ddab..f2267a0b 100644 --- a/examples/how-tos/dynamically-returning-directly.ipynb +++ b/examples/how-tos/dynamically-returning-directly.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "7161c5f2", + "id": "cd47f365", "metadata": {}, "source": [ "# Dynamically Returning Directly\n", @@ -33,22 +33,32 @@ { "cell_type": "code", "execution_count": 1, - "id": "4eb03bd6", - "metadata": {}, - "outputs": [], + "id": "bff262dd", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Direct Return: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"OPENAI_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Direct Return: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\"\n", + "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Direct Return: LangGraphJS\";" ] }, { "cell_type": "markdown", - "id": "57310858", + "id": "f3c02963", "metadata": {}, "source": [ "## Set up the tools\n", @@ -66,7 +76,7 @@ { "cell_type": "code", "execution_count": 2, - "id": "481c95ac", + "id": "c6e93e06", "metadata": {}, "outputs": [], "source": [ @@ -79,10 +89,9 @@ " // that isn't used directly by the tool - it's used by our\n", " // graph instead to determine whether or not to return the\n", " // result directly to the user\n", - " return_direct: z\n", - " .boolean()\n", + " return_direct: z.boolean()\n", " .describe(\n", - " \"Whether or the result of this should be returned directly to the user without you seeing what it is\",\n", + " \"Whether or not the result of this should be returned directly to the user without you seeing what it is\",\n", " )\n", " .default(false),\n", "});\n", @@ -105,7 +114,7 @@ }, { "cell_type": "markdown", - "id": "5b0d34fd", + "id": "f443c375", "metadata": {}, "source": [ "We can now wrap these tools in a simple ToolExecutor.\\\n", @@ -116,19 +125,22 @@ }, { "cell_type": "code", - "execution_count": 3, - "id": "250415e4", - "metadata": {}, + "execution_count": 4, + "id": "82f3a772", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", "\n", - "const toolNode = new ToolNode(tools);\n" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", - "id": "abf3e729", + "id": "e07a9312", "metadata": {}, "source": [ "## Set up the model\n", @@ -147,25 +159,27 @@ }, { "cell_type": "code", - "execution_count": 4, - "id": "2c24d018", - "metadata": {}, + "execution_count": 5, + "id": "f9263d46", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", - "const model = new ChatAnthropic({\n", + "const model = new ChatOpenAI({\n", " temperature: 0,\n", - " model: \"claude-3-haiku-20240307\",\n", + " model: \"gpt-3.5-turbo\",\n", "});\n", "// This formats the tools as json schema for the model API.\n", "// The model then uses this like a system prompt.\n", - "const boundModel = model.bindTools(tools);\n" + "const boundModel = model.bindTools(tools);" ] }, { "cell_type": "markdown", - "id": "644169c4", + "id": "4dbab039", "metadata": {}, "source": [ "## Define the agent state\n", @@ -181,33 +195,36 @@ "\n", "For this example, the state we will track will just be a list of messages. We\n", "want each node to just add messages to that list. Therefore, we will define the\n", - "state as follows:\n" + "state as follows:" ] }, { "cell_type": "code", - "execution_count": 5, - "id": "24454123", - "metadata": {}, + "execution_count": 6, + "id": "c85e2d40", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "interface AgentStateBase {\n", - " messages: Array;\n", - "}\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", "\n", - "interface AgentState extends AgentStateBase {}\n", + "interface AgentState {\n", + " messages: BaseMessage[];\n", + "}\n", "\n", - "const agentState = {\n", + "const agentState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", " },\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "abae32d9", + "id": "fc4b9760", "metadata": {}, "source": [ "## Define the nodes\n", @@ -238,20 +255,23 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "23a8b9c6", - "metadata": {}, + "execution_count": 7, + "id": "c3da4bde", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "import type { RunnableConfig } from \"@langchain/core/runnables\";\n", + "import { RunnableConfig } from \"@langchain/core/runnables\";\n", "import { END } from \"@langchain/langgraph\";\n", + "import { AIMessage } from \"@langchain/core/messages\";\n", "\n", "// Define the function that determines whether to continue or not\n", "const shouldContinue = (state: AgentState) => {\n", " const { messages } = state;\n", - " const lastMessage = messages[messages.length - 1];\n", + " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If there is no function call, then we finish\n", - " if (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0) {\n", + " if (!lastMessage?.tool_calls?.length) {\n", " return END;\n", " } // Otherwise if there is, we check if it's suppose to return direct\n", " else {\n", @@ -270,12 +290,12 @@ " const response = await boundModel.invoke(messages, config);\n", " // We return an object, because this will get added to the existing list\n", " return { messages: [response] };\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "e84040dd", + "id": "cbd38eae", "metadata": {}, "source": [ "## Define the graph\n", @@ -285,143 +305,32 @@ }, { "cell_type": "code", - "execution_count": 7, - "id": "05203811", + "execution_count": 8, + "id": "7f830fef", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " },\n", - " tools: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Call to surf the web.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " },\n", - " final: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Call to surf the web.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " edges: Set(3) {\n", - " [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ],\n", - " [ \u001b[32m\"tools\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ],\n", - " [ \u001b[32m\"final\"\u001b[39m, \u001b[32m\"__end__\"\u001b[39m ]\n", - " },\n", - " branches: {\n", - " agent: {\n", - " shouldContinue: Branch {\n", - " condition: \u001b[36m[Function: shouldContinue]\u001b[39m,\n", - " ends: \u001b[90mundefined\u001b[39m,\n", - " then: \u001b[90mundefined\u001b[39m\n", - " }\n", - " }\n", - " },\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", "\n", "// Define a new graph\n", - "const workflow = new StateGraph({\n", - " channels: agentState,\n", - "});\n", - "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "\n", - "// Note the \"action\" and \"final\" nodes are identical!\n", - "workflow.addNode(\"tools\", toolNode);\n", - "workflow.addNode(\"final\", toolNode);\n", - "\n", - "// Set the entrypoint as `agent`\n", - "workflow.addEdge(START, \"agent\");\n", - "\n", - "// We now add a conditional edge\n", - "workflow.addConditionalEdges(\n", - " // First, we define the start node. We use `agent`.\n", - " \"agent\",\n", - " // Next, we pass in the function that will determine which node is called next.\n", - " shouldContinue,\n", - ");\n", - "\n", - "// We now add a normal edge from `tools` to `agent`.\n", - "workflow.addEdge(\"tools\", \"agent\");\n", - "workflow.addEdge(\"final\", END);\n", + "const workflow = new StateGraph({ channels: agentState })\n", + " // Define the two nodes we will cycle between\n", + " .addNode(\"agent\", callModel)\n", + " // Note the \"action\" and \"final\" nodes are identical!\n", + " .addNode(\"tools\", toolNode)\n", + " .addNode(\"final\", toolNode)\n", + " // Set the entrypoint as `agent`\n", + " .addEdge(START, \"agent\")\n", + " // We now add a conditional edge\n", + " .addConditionalEdges(\n", + " // First, we define the start node. We use `agent`.\n", + " \"agent\",\n", + " // Next, we pass in the function that will determine which node is called next.\n", + " shouldContinue,\n", + " )\n", + " // We now add a normal edge from `tools` to `agent`.\n", + " .addEdge(\"tools\", \"agent\")\n", + " .addEdge(\"final\", END);\n", "\n", "// Finally, we compile it!\n", "const app = workflow.compile();" @@ -429,7 +338,7 @@ }, { "cell_type": "markdown", - "id": "ef7f65bd", + "id": "ac83bfea", "metadata": {}, "source": [ "## Use it!\n", @@ -441,8 +350,8 @@ }, { "cell_type": "code", - "execution_count": 8, - "id": "de5f4864", + "execution_count": 9, + "id": "9ba5e47a", "metadata": {}, "outputs": [ { @@ -451,20 +360,17 @@ "text": [ "[human]: what is the weather in sf\n", "-----\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ai]: [object Object] \n", + "\n", + "[ai]: \n", "Tools: \n", - "- search({\"query\":\"weather in sf\",\"return_direct\":true})\n", + "- search({\"query\":\"weather in San Francisco\"})\n", "-----\n", "\n", "[tool]: It's sunny in San Francisco, but you better look out if you're a Gemini 😈.\n", "-----\n", + "\n", + "[ai]: The weather in San Francisco is sunny.\n", + "-----\n", "\n" ] } @@ -480,8 +386,7 @@ "const prettyPrint = (message: BaseMessage) => {\n", " let txt = `[${message._getType()}]: ${message.content}`;\n", " if (\n", - " (isAIMessage(message) && (message as AIMessage)?.tool_calls?.length) ||\n", - " 0 > 0\n", + " isAIMessage(message) && (message as AIMessage)?.tool_calls?.length || 0 > 0\n", " ) {\n", " const tool_calls = (message as AIMessage)?.tool_calls\n", " ?.map((tc) => `- ${tc.name}(${JSON.stringify(tc.args)})`)\n", @@ -501,8 +406,8 @@ }, { "cell_type": "code", - "execution_count": 9, - "id": "986f8cfe", + "execution_count": 10, + "id": "779e0d88", "metadata": {}, "outputs": [ { @@ -512,9 +417,9 @@ "[human]: what is the weather in sf? return this result directly by setting return_direct = True\n", "-----\n", "\n", - "[ai]: [object Object] \n", + "[ai]: \n", "Tools: \n", - "- search({\"query\":\"weather in sf\",\"return_direct\":true})\n", + "- search({\"query\":\"weather in San Francisco\",\"return_direct\":true})\n", "-----\n", "\n", "[tool]: It's sunny in San Francisco, but you better look out if you're a Gemini 😈.\n", @@ -524,8 +429,6 @@ } ], "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", - "\n", "const inputs = {\n", " messages: [\n", " new HumanMessage(\n", @@ -542,34 +445,43 @@ }, { "cell_type": "markdown", - "id": "51fa73e6", + "id": "f99d8e3b", "metadata": {}, "source": [ - "Done! The graph **stopped** after running the `tools` node!\n" + "Done! The graph **stopped** after running the `tools` node!\n", + "\n", + "```\n", + "```" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7f7cbc2-6b5d-4708-bed9-4a977fd72476", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "jupytext": { - "text_representation": { - "extension": ".py", - "format_name": "percent", - "format_version": "1.3", - "jupytext_version": "1.16.1" - } + "encoding": "# -*- coding: utf-8 -*-" }, "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/examples/how-tos/force-calling-a-tool-first.ipynb b/examples/how-tos/force-calling-a-tool-first.ipynb index 7a3b04fc..585d4260 100644 --- a/examples/how-tos/force-calling-a-tool-first.ipynb +++ b/examples/how-tos/force-calling-a-tool-first.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "7d4b6a8d", + "id": "3246bf13", "metadata": {}, "source": [ "# Force Calling a Tool First\n", @@ -18,7 +18,7 @@ }, { "cell_type": "markdown", - "id": "ee2d626b", + "id": "f7c184d4", "metadata": {}, "source": [ "## Setup\n", @@ -36,23 +36,33 @@ }, { "cell_type": "code", - "execution_count": 14, - "id": "f7d70783", - "metadata": {}, - "outputs": [], + "execution_count": 1, + "id": "6327203c", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Force Calling a Tool First: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"OPENAI*API_KEY\", \"sk*...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls\\_\\_...\");\n", - "// Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Force Calling a Tool First: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", + "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Force Calling a Tool First: LangGraphJS\";" ] }, { "cell_type": "markdown", - "id": "7321b035", + "id": "247b32e0", "metadata": {}, "source": [ "## Set up the tools\n", @@ -66,19 +76,20 @@ }, { "cell_type": "code", - "execution_count": 15, - "id": "c012c726", - "metadata": {}, + "execution_count": 2, + "id": "294b9a8c", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { - "data": { - "text/plain": [ - "\u001b[32m\"Cold, with a low of 13 ℃\"\u001b[39m" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" + "name": "stderr", + "output_type": "stream", + "text": [ + "[WARN]: You have enabled LangSmith tracing without backgrounding callbacks.\n", + "[WARN]: If you are not using a serverless environment where you must wait for tracing calls to finish,\n", + "[WARN]: we suggest setting \"process.env.LANGCHAIN_CALLBACKS_BACKGROUND=true\" to avoid additional latency.\n" + ] } ], "source": [ @@ -100,12 +111,12 @@ "\n", "await searchTool.invoke({ query: \"What's the weather like?\" });\n", "\n", - "const tools = [searchTool];\n" + "const tools = [searchTool];" ] }, { "cell_type": "markdown", - "id": "3e6df03f", + "id": "8aa4e65a", "metadata": {}, "source": [ "We can now wrap these tools in a simple ToolExecutor. This is a real simple\n", @@ -115,19 +126,22 @@ }, { "cell_type": "code", - "execution_count": 16, - "id": "4b29aeb2", - "metadata": {}, + "execution_count": 3, + "id": "51927bdc", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", "\n", - "const toolNode = new ToolNode(tools);\n" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", - "id": "29f62fe3", + "id": "7a2de12a", "metadata": {}, "source": [ "## Set up the model\n", @@ -146,21 +160,24 @@ }, { "cell_type": "code", - "execution_count": 17, - "id": "c600af4a", - "metadata": {}, + "execution_count": 4, + "id": "d6e85df1", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", "const model = new ChatOpenAI({\n", " temperature: 0,\n", - "});\n" + " model: \"gpt-4o\",\n", + "});" ] }, { "cell_type": "markdown", - "id": "de429bd2", + "id": "97cfea7d", "metadata": {}, "source": [ "After we've done this, we should make sure the model knows that it has these\n", @@ -170,17 +187,19 @@ }, { "cell_type": "code", - "execution_count": 18, - "id": "38310048", - "metadata": {}, + "execution_count": 5, + "id": "4f0b4a08", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "const boundModel = model.bindTools(tools);\n" + "const boundModel = model.bindTools(tools);" ] }, { "cell_type": "markdown", - "id": "a8178642", + "id": "59ca3d78", "metadata": {}, "source": [ "## Define the agent state\n", @@ -197,24 +216,31 @@ }, { "cell_type": "code", - "execution_count": 19, - "id": "6e6f8a6e", - "metadata": {}, + "execution_count": 7, + "id": "3db3dd6e", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", + "\n", + "interface IState {\n", + " messages: BaseMessage[];\n", + "}\n", "\n", - "const agentState = {\n", + "const agentState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", " },\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "766f32b5", + "id": "97b540fc", "metadata": {}, "source": [ "## Define the nodes\n", @@ -246,17 +272,16 @@ }, { "cell_type": "code", - "execution_count": 20, - "id": "9a32665b", + "execution_count": 8, + "id": "eee5adc0", "metadata": {}, "outputs": [], "source": [ "import { AIMessage } from \"@langchain/core/messages\";\n", - "import { AgentAction } from \"@langchain/core/agents\";\n", - "import type { RunnableConfig } from \"@langchain/core/runnables\";\n", + "import { RunnableConfig } from \"@langchain/core/runnables\";\n", "\n", "// Define logic that will be used to determine which conditional edge to go down\n", - "const shouldContinue = (state: { messages: Array }) => {\n", + "const shouldContinue = (state: IState) => {\n", " const { messages } = state;\n", " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If there is no function call, then we finish\n", @@ -269,7 +294,7 @@ "\n", "// Define the function that calls the model\n", "const callModel = async (\n", - " state: { messages: Array },\n", + " state: IState,\n", " config: RunnableConfig,\n", ") => {\n", " const { messages } = state;\n", @@ -290,7 +315,7 @@ }, { "cell_type": "markdown", - "id": "19c627b3", + "id": "00a87145", "metadata": {}, "source": [ "**MODIFICATION**\n", @@ -301,13 +326,15 @@ }, { "cell_type": "code", - "execution_count": 21, - "id": "d5b72426", - "metadata": {}, + "execution_count": 9, + "id": "e9104fc7", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "// This is the new first - the first call of the model we want to explicitly hard-code some action\n", - "const firstModel = async (state: { messages: Array }) => {\n", + "const firstModel = async (state: IState) => {\n", " const humanInput = state.messages[state.messages.length - 1].content || \"\";\n", " return {\n", " messages: [\n", @@ -325,12 +352,12 @@ " }),\n", " ],\n", " };\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "35419e01", + "id": "e609be17", "metadata": {}, "source": [ "## Define the graph\n", @@ -344,140 +371,50 @@ }, { "cell_type": "code", - "execution_count": 24, - "id": "3d27c330", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " first_agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: firstModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: firstModel]\u001b[39m\n", - " },\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " },\n", - " action: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Use to surf the web, fetch current information, check the weather, and retrieve other information.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " edges: Set(3) {\n", - " [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"first_agent\"\u001b[39m ],\n", - " [ \u001b[32m\"action\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ],\n", - " [ \u001b[32m\"first_agent\"\u001b[39m, \u001b[32m\"action\"\u001b[39m ]\n", - " },\n", - " branches: {\n", - " agent: {\n", - " shouldContinue: Branch {\n", - " condition: \u001b[36m[Function: shouldContinue]\u001b[39m,\n", - " ends: { continue: \u001b[32m\"action\"\u001b[39m, end: \u001b[32m\"__end__\"\u001b[39m },\n", - " then: \u001b[90mundefined\u001b[39m\n", - " }\n", - " }\n", - " },\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": 10, + "id": "de6da918", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], "source": [ "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", "\n", "// Define a new graph\n", - "const workflow = new StateGraph({\n", - " channels: agentState,\n", - "});\n", - "\n", - "// Define the new entrypoint\n", - "workflow.addNode(\"first_agent\", firstModel);\n", - "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "workflow.addNode(\"action\", toolNode);\n", - "\n", - "// Set the entrypoint as `first_agent`\n", - "// by creating an edge from the virtual __start__ node to `first_agent`\n", - "workflow.addEdge(START, \"first_agent\");\n", - "\n", - "// We now add a conditional edge\n", - "workflow.addConditionalEdges(\n", - " // First, we define the start node. We use `agent`.\n", - " // This means these are the edges taken after the `agent` node is called.\n", - " \"agent\",\n", - " // Next, we pass in the function that will determine which node is called next.\n", - " shouldContinue,\n", - " // Finally we pass in a mapping.\n", - " // The keys are strings, and the values are other nodes.\n", - " // END is a special node marking that the graph should finish.\n", - " // What will happen is we will call `should_continue`, and then the output of that\n", - " // will be matched against the keys in this mapping.\n", - " // Based on which one it matches, that node will then be called.\n", - " {\n", - " // If `tools`, then we call the tool node.\n", - " continue: \"action\",\n", - " // Otherwise we finish.\n", - " end: END,\n", - " },\n", - ");\n", - "\n", - "// We now add a normal edge from `tools` to `agent`.\n", - "// This means that after `tools` is called, `agent` node is called next.\n", - "workflow.addEdge(\"action\", \"agent\");\n", - "\n", - "// After we call the first agent, we know we want to go to action\n", - "workflow.addEdge(\"first_agent\", \"action\");\n", + "const workflow = new StateGraph({ channels: agentState })\n", + " // Define the new entrypoint\n", + " .addNode(\"first_agent\", firstModel)\n", + " // Define the two nodes we will cycle between\n", + " .addNode(\"agent\", callModel)\n", + " .addNode(\"action\", toolNode)\n", + " // Set the entrypoint as `first_agent`\n", + " // by creating an edge from the virtual __start__ node to `first_agent`\n", + " .addEdge(START, \"first_agent\")\n", + " // We now add a conditional edge\n", + " .addConditionalEdges(\n", + " // First, we define the start node. We use `agent`.\n", + " // This means these are the edges taken after the `agent` node is called.\n", + " \"agent\",\n", + " // Next, we pass in the function that will determine which node is called next.\n", + " shouldContinue,\n", + " // Finally we pass in a mapping.\n", + " // The keys are strings, and the values are other nodes.\n", + " // END is a special node marking that the graph should finish.\n", + " // What will happen is we will call `should_continue`, and then the output of that\n", + " // will be matched against the keys in this mapping.\n", + " // Based on which one it matches, that node will then be called.\n", + " {\n", + " // If `tools`, then we call the tool node.\n", + " continue: \"action\",\n", + " // Otherwise we finish.\n", + " end: END,\n", + " },\n", + " )\n", + " // We now add a normal edge from `tools` to `agent`.\n", + " // This means that after `tools` is called, `agent` node is called next.\n", + " .addEdge(\"action\", \"agent\")\n", + " // After we call the first agent, we know we want to go to action\n", + " .addEdge(\"first_agent\", \"action\");\n", "\n", "// Finally, we compile it!\n", "// This compiles it into a LangChain Runnable,\n", @@ -487,7 +424,7 @@ }, { "cell_type": "markdown", - "id": "a9eea2d0", + "id": "bd4f83be", "metadata": {}, "source": [ "## Use it!\n", @@ -499,88 +436,23 @@ }, { "cell_type": "code", - "execution_count": 25, - "id": "47d10628", - "metadata": {}, + "execution_count": 11, + "id": "acaade41", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{\n", - " first_agent: {\n", - " messages: [\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"\",\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " }\n", - "}\n", + "{ first_agent: { messages: [ \u001b[36m[AIMessage]\u001b[39m ] } }\n", "-----\n", "\n", - "{\n", - " action: {\n", - " messages: [\n", - " ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " name: \"search\",\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " tool_call_id: \"tool_abcd123\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " name: \"search\",\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_call_id: \"tool_abcd123\"\n", - " }\n", - " ]\n", - " }\n", - "}\n", + "{ action: { messages: [ \u001b[36m[ToolMessage]\u001b[39m ] } }\n", "-----\n", "\n", - "{\n", - " agent: {\n", - " messages: [\n", - " AIMessageChunk {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"The weather in San Francisco is currently cold, with a low of 13°C.\",\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " tool_call_chunks: [],\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"The weather in San Francisco is currently cold, with a low of 13°C.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: { prompt: 0, completion: 0, finish_reason: \"stop\" },\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " tool_call_chunks: []\n", - " }\n", - " ]\n", - " }\n", - "}\n", + "{ agent: { messages: [ \u001b[36m[AIMessageChunk]\u001b[39m ] } }\n", "-----\n", "\n" ] @@ -596,13 +468,13 @@ "for await (const output of await app.stream(inputs)) {\n", " console.log(output);\n", " console.log(\"-----\\n\");\n", - "}\n" + "}" ] }, { "cell_type": "code", "execution_count": null, - "id": "d7e74d9d", + "id": "241e2fc9-9bee-4f2c-a077-09472a7b5613", "metadata": {}, "outputs": [], "source": [] @@ -610,25 +482,23 @@ ], "metadata": { "jupytext": { - "text_representation": { - "extension": ".py", - "format_name": "percent", - "format_version": "1.3", - "jupytext_version": "1.16.1" - } + "encoding": "# -*- coding: utf-8 -*-" }, "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/examples/how-tos/human-in-the-loop.ipynb b/examples/how-tos/human-in-the-loop.ipynb index 1b8ec831..e806d074 100644 --- a/examples/how-tos/human-in-the-loop.ipynb +++ b/examples/how-tos/human-in-the-loop.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "7388763d", "metadata": {}, "source": [ "# Human-in-the-loop\n", @@ -37,20 +38,32 @@ { "cell_type": "code", "execution_count": 1, - "metadata": {}, - "outputs": [], + "id": "74b6bfe1", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Human-in-the-loop: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"OPENAI_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "// Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Human-in-the-loop: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", + "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Human-in-the-loop: LangGraphJS\";" ] }, { "cell_type": "markdown", + "id": "a7b247cf", "metadata": {}, "source": [ "## Set up the State\n", @@ -61,29 +74,31 @@ { "cell_type": "code", "execution_count": 2, - "metadata": {}, + "id": "50c5189d", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", "\n", "interface IState {\n", - " messages: {\n", - " value: (x: BaseMessage[], y: BaseMessage[]) => BaseMessage[];\n", - " default: () => BaseMessage[];\n", - " };\n", + " messages: BaseMessage[];\n", "}\n", "\n", "// This defines the agent state\n", - "const graphState: IState = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", " },\n", - "};\n" + "};" ] }, { "cell_type": "markdown", + "id": "b1607663", "metadata": {}, "source": [ "## Set up the tools\n", @@ -98,7 +113,10 @@ { "cell_type": "code", "execution_count": 3, - "metadata": {}, + "id": "b24f1e9e", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { "name": "stderr", @@ -127,11 +145,12 @@ " },\n", "});\n", "\n", - "const tools = [searchTool];\n" + "const tools = [searchTool];" ] }, { "cell_type": "markdown", + "id": "1cfcf345", "metadata": {}, "source": [ "We can now wrap these tools in a simple\n", @@ -139,23 +158,28 @@ "\n", "This is a simple class that takes in a list of messages containing an\n", "[AIMessage with tool_calls](https://v02.api.js.langchain.com/classes/langchain_core_messages.AIMessage.html),\n", - "runs the tools, and returns the output as\n", + "runs the tools, and returns the output as\\\n", "[ToolMessage](https://v02.api.js.langchain.com/classes/langchain_core_messages_tool.ToolMessage.html)s." ] }, { "cell_type": "code", "execution_count": 4, - "metadata": {}, + "id": "528a8e10", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", "\n", - "const toolNode = new ToolNode(tools);\n" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", + "id": "b3bed0ce", "metadata": {}, "source": [ "## Set up the model\n", @@ -172,18 +196,11 @@ { "cell_type": "code", "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[WARN]: You have enabled LangSmith tracing without backgrounding callbacks.\n", - "[WARN]: If you are not using a serverless environment where you must wait for tracing calls to finish,\n", - "[WARN]: we suggest setting \"process.env.LANGCHAIN_CALLBACKS_BACKGROUND=true\" to avoid additional latency.\n" - ] - } - ], + "id": "b12434ff", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], "source": [ "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", @@ -191,22 +208,24 @@ "\n", "// After we've done this, we should make sure the model knows that it has these tools available to call.\n", "// We can do this by binding the tools to the model class.\n", - "const boundModel = model.bindTools(tools);\n" + "const boundModel = model.bindTools(tools);" ] }, { "cell_type": "markdown", + "id": "97312191", "metadata": {}, "source": [ "## Define the nodes\n", "\n", "We now need to define a few different nodes in our graph. In `langgraph`, a node\n", - "can be either a function or a\n", + "can be either a function or a\\\n", "[runnable](https://js.langchain.com/docs/modules/runnables/). There are two main\n", "nodes we need for this:\n", "\n", "1. The agent: responsible for deciding what (if any) actions to take.\n", - "2. A function to invoke tools: if the agent decides to take an action, this node\n", + "2. A function to invoke tools: if the agent decides to take an action, this\n", + " node\\\n", " will then execute that action.\n", "\n", "We will also need to define some edges. Some of these edges may be conditional.\n", @@ -228,18 +247,19 @@ { "cell_type": "code", "execution_count": 6, + "id": "b52bef4a", "metadata": {}, "outputs": [], "source": [ "import { RunnableConfig } from \"@langchain/core/runnables\";\n", - "import { AIMessage, BaseMessage } from \"@langchain/core/messages\";\n", + "import { AIMessage } from \"@langchain/core/messages\";\n", "import { END } from \"@langchain/langgraph\";\n", "\n", - "const routeMessage = (state: { messages: Array }) => {\n", + "const routeMessage = (state: IState) => {\n", " const { messages } = state;\n", " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If no tools are called, we can finish (respond to the user)\n", - " if (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0) {\n", + " if (!lastMessage?.tool_calls?.length) {\n", " return END;\n", " }\n", " // Otherwise if there is, we continue and call the tools\n", @@ -247,7 +267,7 @@ "};\n", "\n", "const callModel = async (\n", - " state: { messages: Array },\n", + " state: IState,\n", " config: RunnableConfig,\n", ") => {\n", " const { messages } = state;\n", @@ -258,6 +278,7 @@ }, { "cell_type": "markdown", + "id": "22360833", "metadata": {}, "source": [ "## Define the graph\n", @@ -267,102 +288,28 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 7, + "id": "a8b8dace", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " },\n", - " tools: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Call to surf the web.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " edges: Set(2) { [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ], [ \u001b[32m\"tools\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ] },\n", - " branches: {\n", - " agent: {\n", - " routeMessage: Branch {\n", - " condition: \u001b[36m[Function: routeMessage]\u001b[39m,\n", - " ends: \u001b[90mundefined\u001b[39m,\n", - " then: \u001b[90mundefined\u001b[39m\n", - " }\n", - " }\n", - " },\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", "import { MemorySaver } from \"@langchain/langgraph\";\n", "\n", - "const workflow = new StateGraph({\n", + "const workflow = new StateGraph({\n", " channels: graphState,\n", - "});\n", + "})\n", + " .addNode(\"agent\", callModel)\n", + " .addNode(\"tools\", toolNode)\n", + " .addEdge(START, \"agent\")\n", + " .addConditionalEdges(\"agent\", routeMessage, { tools: \"tools\", finish: END })\n", + " .addEdge(\"tools\", \"agent\");\n", "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "workflow.addNode(\"tools\", toolNode);\n", - "\n", - "// Set the entrypoint as `agent`\n", - "// This means that this node is the first one called\n", - "workflow.addEdge(START, \"agent\");\n", - "workflow.addConditionalEdges(\"agent\", routeMessage);\n", - "workflow.addEdge(\"tools\", \"agent\");\n", "// **Persistence**\n", - "// Human-in-the-loop worflows require a checkpointer to ensure\n", + "// Human-in-the-loop workflows require a checkpointer to ensure\n", "// nothing is lost between interactions\n", "const checkpointer = new MemorySaver();\n", + "\n", "// **Interrupt**\n", "// To always interrupt before a particular node, pass the name of the node to `interruptBefore` when compiling.\n", "const graph = workflow.compile({ checkpointer, interruptBefore: [\"tools\"] });" @@ -370,6 +317,7 @@ }, { "cell_type": "markdown", + "id": "325d45c5", "metadata": {}, "source": [ "## Interacting with the Agent\n", @@ -379,14 +327,28 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 8, + "id": "17d79c6b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[human]: hi! I'm bob\n", + "[human]: hi! I'm bob\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "[ai]: Hello Bob! How can I assist you today?\n" ] } @@ -402,12 +364,11 @@ "const prettyPrint = (message: BaseMessage) => {\n", " let txt = `[${message._getType()}]: ${message.content}`;\n", " if (\n", - " isAIMessage(message) && (message as AIMessage)?.tool_calls?.length ||\n", - " 0 > 0\n", + " isAIMessage(message) && (message as AIMessage)?.tool_calls?.length || 0 > 0\n", " ) {\n", - " const tool_calls = (message as AIMessage)?.tool_calls?.map(\n", - " (tc) => `- ${tc.name}(${JSON.stringify(tc.args)})`,\n", - " ).join(\"\\n\");\n", + " const tool_calls = (message as AIMessage)?.tool_calls\n", + " ?.map((tc) => `- ${tc.name}(${JSON.stringify(tc.args)})`)\n", + " .join(\"\\n\");\n", " txt += ` \\nTools: \\n${tool_calls}`;\n", " }\n", " console.log(txt);\n", @@ -428,14 +389,28 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 9, + "id": "2166619d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[human]: What did I tell you my name was?\n", + "[human]: What did I tell you my name was?\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "[ai]: You mentioned that your name is Bob. How can I help you, Bob?\n" ] } @@ -454,7 +429,8 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 10, + "id": "f523a658", "metadata": {}, "outputs": [ { @@ -482,6 +458,7 @@ }, { "cell_type": "markdown", + "id": "13d79e78", "metadata": {}, "source": [ "**Resume**\n", @@ -495,15 +472,29 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 12, + "id": "a0dc047e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[tool]: It's sunny in San Francisco, but you better look out if you're a Gemini 😈.\n", - "[ai]: The current weather in San Francisco is sunny. Enjoy the sunshine! If you have any more questions or need assistance, feel free to ask.\n" + "[tool]: It's sunny in San Francisco, but you better look out if you're a Gemini 😈.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ai]: It seems like it's sunny in San Francisco at the moment. If you need more detailed weather information, feel free to ask!\n" ] } ], @@ -517,23 +508,37 @@ " prettyPrint(messages[messages.length - 1]);\n", "}" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8185b577-064b-4b5c-8198-4c4d4b6d6cee", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { + "jupytext": { + "encoding": "# -*- coding: utf-8 -*-" + }, "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/examples/how-tos/managing-agent-steps.ipynb b/examples/how-tos/managing-agent-steps.ipynb index ba721e14..3f142b27 100644 --- a/examples/how-tos/managing-agent-steps.ipynb +++ b/examples/how-tos/managing-agent-steps.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "079c907e", + "id": "38a67792", "metadata": {}, "source": [ "# Managing Agent Steps\n", @@ -13,44 +13,32 @@ "The previous examples just put all messages into the model, but that extra\n", "context can distract the agent and add latency to the API calls. In this example\n", "we will only include the `N` most recent messages in the chat history. Note that\n", - "this is meant to be illustrative of general state management." - ] - }, - { - "cell_type": "markdown", - "id": "3062815d", - "metadata": {}, - "source": [ + "this is meant to be illustrative of general state management.\n", + "\n", "## Setup\n", "\n", "First we need to install the packages required\n", "\n", "```bash\n", - "yarn add @langchain/langgraph @langchain/anthropic\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "2cf73929", - "metadata": {}, - "source": [ + "yarn add @langchain/langgraph @langchain/openai\n", + "```\n", + "\n", "Next, we need to set API keys for Anthropic (the LLM we will use)." ] }, { "cell_type": "code", "execution_count": 1, - "id": "a5482d50", + "id": "36033b66", "metadata": {}, "outputs": [], "source": [ - "// Deno.env.set(\"ANTHROPIC_API_KEY\", \"sk_...\");" + "// process.env.OPENAI_API_KEY = \"sk_...\";" ] }, { "cell_type": "markdown", - "id": "8e0e6786", + "id": "e98d96da", "metadata": {}, "source": [ "Optionally, we can set API key for\n", @@ -61,20 +49,28 @@ { "cell_type": "code", "execution_count": 2, - "id": "e268b155", + "id": "38934fde", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Managing Agent Steps: LangGraphJS\n" + ] + } + ], "source": [ "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Managing Agent Steps: LangGraphJS\");" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", + "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Managing Agent Steps: LangGraphJS\";" ] }, { "cell_type": "markdown", - "id": "3d0ee64f", + "id": "8aeecba6", "metadata": {}, "source": [ "## Set up the State\n", @@ -95,13 +91,18 @@ { "cell_type": "code", "execution_count": 3, - "id": "d278579e", + "id": "e95ef6be", "metadata": {}, "outputs": [], "source": [ "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", + "\n", + "interface IState {\n", + " messages: BaseMessage[];\n", + "}\n", "\n", - "const graphState = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", @@ -111,7 +112,7 @@ }, { "cell_type": "markdown", - "id": "2787e144", + "id": "d6954509", "metadata": {}, "source": [ "## Set up the tools\n", @@ -125,8 +126,8 @@ }, { "cell_type": "code", - "execution_count": 4, - "id": "977f2a9c", + "execution_count": 5, + "id": "ec9f73a5", "metadata": {}, "outputs": [], "source": [ @@ -141,9 +142,7 @@ " }),\n", " func: async ({ query }: { query: string }) => {\n", " // This is a placeholder, but don't tell the LLM that...\n", - " return [\n", - " \"Try again in a few seconds! Checking with the weathermen... Call be again next.\",\n", - " ];\n", + " return \"Try again in a few seconds! Checking with the weathermen... Call be again next.\";\n", " },\n", "});\n", "\n", @@ -152,7 +151,7 @@ }, { "cell_type": "markdown", - "id": "1858b5fa", + "id": "e8669db6", "metadata": {}, "source": [ "We can now wrap these tools in a simple\n", @@ -165,19 +164,19 @@ }, { "cell_type": "code", - "execution_count": 5, - "id": "184af26c", + "execution_count": 6, + "id": "7f4829c3", "metadata": {}, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", "\n", - "const toolNode = new ToolNode(tools);" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", - "id": "a1cc64de", + "id": "81a0a750", "metadata": {}, "source": [ "## Set up the model\n", @@ -196,23 +195,23 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "39ffe2d8", + "execution_count": 9, + "id": "cf1fcc3f", "metadata": {}, "outputs": [], "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", - "const model = new ChatAnthropic({\n", - " model: \"claude-3-haiku-20240307\",\n", + "const model = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", " temperature: 0,\n", "});" ] }, { "cell_type": "code", - "execution_count": 7, - "id": "7d443e41", + "execution_count": 10, + "id": "a0903bb8", "metadata": {}, "outputs": [], "source": [ @@ -223,7 +222,7 @@ }, { "cell_type": "markdown", - "id": "7f42428b", + "id": "c96f67f3", "metadata": {}, "source": [ "## Define the nodes\n", @@ -255,16 +254,17 @@ }, { "cell_type": "code", - "execution_count": 8, - "id": "6a91fe60", + "execution_count": 22, + "id": "1249b1b3", "metadata": {}, "outputs": [], "source": [ "import { END } from \"@langchain/langgraph\";\n", "import { AIMessage, BaseMessage, ToolMessage } from \"@langchain/core/messages\";\n", + "import { RunnableConfig } from \"@langchain/core/runnables\";\n", "\n", "// Define the function that determines whether to continue or not\n", - "const shouldContinue = (state: { messages: Array }) => {\n", + "const shouldContinue = (state: IState) => {\n", " const { messages } = state;\n", " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If there is no function call, then we finish\n", @@ -278,19 +278,22 @@ "// **MODIFICATION**\n", "//\n", "// Here we don't pass all messages to the model but rather only pass the `N` most recent. Note that this is a terribly simplistic way to handle messages meant as an illustration, and there may be other methods you may want to look into depending on your use case. We also have to make sure we don't truncate the chat history to include the tool message first, as this would cause an API error.\n", - "const callModel = async (state: { messages: Array }) => {\n", + "const callModel = async (\n", + " state: IState,\n", + " config: RunnableConfig,\n", + ") => {\n", " let modelMessages = [];\n", " for (let i = state.messages.length - 1; i >= 0; i--) {\n", " modelMessages.push(state.messages[i]);\n", " if (modelMessages.length >= 5) {\n", - " if (ToolMessage.isInstance(modelMessages[modelMessages.length - 1])) {\n", + " if (!ToolMessage.isInstance(modelMessages[modelMessages.length - 1])) {\n", " break;\n", " }\n", " }\n", " }\n", " modelMessages.reverse();\n", "\n", - " const response = await boundModel.invoke(modelMessages);\n", + " const response = await boundModel.invoke(modelMessages, config);\n", " // We return an object, because this will get added to the existing list\n", " return { messages: [response] };\n", "};" @@ -298,7 +301,7 @@ }, { "cell_type": "markdown", - "id": "f805c6bc", + "id": "227a5040", "metadata": {}, "source": [ "## Define the graph\n", @@ -308,122 +311,29 @@ }, { "cell_type": "code", - "execution_count": 9, - "id": "f79df647", + "execution_count": 23, + "id": "ff5f7b65", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " },\n", - " tools: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Call to surf the web.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " edges: Set(2) { [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ], [ \u001b[32m\"tools\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ] },\n", - " branches: {\n", - " agent: {\n", - " shouldContinue: Branch {\n", - " condition: \u001b[36m[Function: shouldContinue]\u001b[39m,\n", - " ends: { action: \u001b[32m\"tools\"\u001b[39m, __end__: \u001b[32m\"__end__\"\u001b[39m },\n", - " then: \u001b[90mundefined\u001b[39m\n", - " }\n", - " }\n", - " },\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", "\n", "// Define a new graph\n", - "const workflow = new StateGraph({\n", + "const workflow = new StateGraph({\n", " channels: graphState,\n", - "});\n", - "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "workflow.addNode(\"tools\", toolNode);\n", - "\n", - "// Set the entrypoint as `agent`\n", - "// This means that this node is the first one called\n", - "workflow.addEdge(START, \"agent\");\n", - "\n", - "// We now add a conditional edge\n", - "workflow.addConditionalEdges(\n", - " // First, we define the start node. We use `agent`.\n", - " // This means these are the edges taken after the `agent` node is called.\n", - " \"agent\",\n", - " // Next, we pass in the function that will determine which node is called next.\n", - " shouldContinue,\n", - " // Finally we pass in a mapping.\n", - " // The keys are strings, and the values are other nodes.\n", - " // END is a special node marking that the graph should finish.\n", - " // What will happen is we will call `shouldContinue`, and then the output of that\n", - " // will be matched against the keys in this mapping.\n", - " // Based on which one it matches, that node will then be called.\n", - " {\n", - " // If `action`, then we call the tool node.\n", - " action: \"tools\",\n", - " // Otherwise we finish.\n", - " [END]: END,\n", - " },\n", - ");\n", - "\n", - "// We now add a normal edge from `action` to `agent`.\n", - "// This means that after `action` is called, `agent` node is called next.\n", - "workflow.addEdge(\"tools\", \"agent\");\n", + "})\n", + " .addNode(\"agent\", callModel)\n", + " .addNode(\"tools\", toolNode)\n", + " .addEdge(START, \"agent\")\n", + " .addConditionalEdges(\n", + " \"agent\",\n", + " shouldContinue,\n", + " {\n", + " tools: \"tools\",\n", + " [END]: END,\n", + " },\n", + " )\n", + " .addEdge(\"tools\", \"agent\");\n", "\n", "// Finally, we compile it!\n", "// This compiles it into a LangChain Runnable,\n", @@ -433,7 +343,7 @@ }, { "cell_type": "markdown", - "id": "9ba2d258", + "id": "6049db62", "metadata": {}, "source": [ "## Use it!\n", @@ -445,31 +355,52 @@ }, { "cell_type": "code", - "execution_count": 10, - "id": "f6449f83", + "execution_count": 27, + "id": "7bd7315e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"what is the weather in sf? Don't give up! Keep using your tools.\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"what is the weather in sf? Don't give up! Keep using your tools.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " }\n", - " ]\n", - "}\n", + "[human]: what is the weather in sf? Don't give up! Keep using your tools.\n", + "-----\n", + "\n", + "[ai]: \n", + "Tools: \n", + "- search({\"query\":\"current weather in San Francisco\"})\n", + "-----\n", + "\n", + "[tool]: Try again in a few seconds! Checking with the weathermen... Call be again next.\n", + "-----\n", + "\n", + "[ai]: \n", + "Tools: \n", + "- search({\"query\":\"current weather in San Francisco\"})\n", + "-----\n", + "\n", + "[tool]: Try again in a few seconds! Checking with the weathermen... Call be again next.\n", + "-----\n", + "\n", + "[ai]: \n", + "Tools: \n", + "- search({\"query\":\"current weather in San Francisco\"})\n", + "-----\n", + "\n", + "[tool]: Try again in a few seconds! Checking with the weathermen... Call be again next.\n", + "-----\n", + "\n", + "[ai]: \n", + "Tools: \n", + "- search({\"query\":\"current weather in San Francisco\"})\n", + "-----\n", + "\n", + "[tool]: Try again in a few seconds! Checking with the weathermen... Call be again next.\n", + "-----\n", + "\n", + "[ai]: \n", + "Tools: \n", + "- search({\"query\":\"current weather in San Francisco\"})\n", "-----\n", "\n" ] @@ -478,93 +409,30 @@ "name": "stderr", "output_type": "stream", "text": [ - "Skipping write for channel branch:agent:shouldContinue:undefined which has no readers\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " messages: [\n", - " HumanMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"what is the weather in sf? Don't give up! Keep using your tools.\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"what is the weather in sf? Don't give up! Keep using your tools.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: [ [Object], [Object] ],\n", - " additional_kwargs: {\n", - " id: \"msg_01Bgr5QEcJGJLVMSCEMnGBmB\",\n", - " type: \"message\",\n", - " role: \"assistant\",\n", - " model: \"claude-3-haiku-20240307\",\n", - " stop_sequence: null,\n", - " usage: [Object],\n", - " stop_reason: \"tool_use\"\n", - " },\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: [],\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: [\n", - " {\n", - " type: \"text\",\n", - " text: \"Okay, let's try to find the weather in San Francisco using the available tools.\"\n", - " },\n", - " {\n", - " type: \"tool_use\",\n", - " id: \"toolu_01PBVj5LCRkSYcSCGLTMVFj3\",\n", - " name: \"search\",\n", - " input: [Object]\n", - " }\n", - " ],\n", - " name: undefined,\n", - " additional_kwargs: {\n", - " id: \"msg_01Bgr5QEcJGJLVMSCEMnGBmB\",\n", - " type: \"message\",\n", - " role: \"assistant\",\n", - " model: \"claude-3-haiku-20240307\",\n", - " stop_sequence: null,\n", - " usage: { input_tokens: 378, output_tokens: 73 },\n", - " stop_reason: \"tool_use\"\n", - " },\n", - " response_metadata: {\n", - " id: \"msg_01Bgr5QEcJGJLVMSCEMnGBmB\",\n", - " model: \"claude-3-haiku-20240307\",\n", - " stop_sequence: null,\n", - " usage: { input_tokens: 378, output_tokens: 73 },\n", - " stop_reason: \"tool_use\"\n", - " },\n", - " tool_calls: [\n", - " {\n", - " name: \"search\",\n", - " args: [Object],\n", - " id: \"toolu_01PBVj5LCRkSYcSCGLTMVFj3\"\n", - " }\n", - " ],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - "}\n", - "-----\n", - "\n" + "GraphRecursionError: Recursion limit of 10 reached without hitting a stop condition. You can increase the limit by setting the \"recursionLimit\" config key.\n", + " at CompiledStateGraph._transform (/Users/wfh/code/lc/langgraphjs/langgraph/dist/pregel/index.cjs:432:27)\n", + "\u001b[90m at process.processTicksAndRejections (node:internal/process/task_queues:95:5)\u001b[39m\n", + " at async CompiledStateGraph._transformStreamWithConfig (/Users/wfh/code/lc/langgraphjs/node_modules/\u001b[4m@langchain\u001b[24m/core/dist/runnables/base.cjs:290:30)\n", + " at async CompiledStateGraph.transform (/Users/wfh/code/lc/langgraphjs/langgraph/dist/pregel/index.cjs:527:26)\n", + " at async Object.pull (/Users/wfh/code/lc/langgraphjs/node_modules/\u001b[4m@langchain\u001b[24m/core/dist/utils/stream.cjs:96:41)\n" ] } ], "source": [ - "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { HumanMessage, isAIMessage } from \"@langchain/core/messages\";\n", + "\n", + "const prettyPrint = (message: BaseMessage) => {\n", + " let txt = `[${message._getType()}]: ${message.content}`;\n", + " if (\n", + " isAIMessage(message) && (message as AIMessage)?.tool_calls?.length || 0 > 0\n", + " ) {\n", + " const tool_calls = (message as AIMessage)?.tool_calls\n", + " ?.map((tc) => `- ${tc.name}(${JSON.stringify(tc.args)})`)\n", + " .join(\"\\n\");\n", + " txt += ` \\nTools: \\n${tool_calls}`;\n", + " }\n", + " console.log(txt);\n", + "};\n", "\n", "const inputs = {\n", " messages: [\n", @@ -573,8 +441,15 @@ " ),\n", " ],\n", "};\n", - "for await (const output of await app.stream(inputs, { streamMode: \"values\" })) {\n", - " console.log(output);\n", + "// Setting the recursionLimit will set a max number of steps. We expect this to endlessly loop :)\n", + "for await (\n", + " const output of await app.stream(inputs, {\n", + " streamMode: \"values\",\n", + " recursionLimit: 10,\n", + " })\n", + ") {\n", + " const lastMessage = output.messages[output.messages.length - 1];\n", + " prettyPrint(lastMessage);\n", " console.log(\"-----\\n\");\n", "}" ] @@ -582,33 +457,28 @@ { "cell_type": "code", "execution_count": null, - "id": "d7a25624", + "id": "763c79c8-71b8-441f-a52a-d03708170d12", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { - "jupytext": { - "text_representation": { - "extension": ".py", - "format_name": "percent", - "format_version": "1.3", - "jupytext_version": "1.16.1" - } - }, "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/examples/how-tos/persistence.ipynb b/examples/how-tos/persistence.ipynb index f0cc1feb..139d5034 100644 --- a/examples/how-tos/persistence.ipynb +++ b/examples/how-tos/persistence.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "9395cccb-a6d0-4d46-bad7-ed2d012af58a", + "id": "aad4e28d", "metadata": {}, "source": [ "# Persistence\n", @@ -47,13 +47,13 @@ "
\n", "

Note

\n", "

\n", - " In this how-to, we will create our agent from scratch to be transparent (but verbose). You can accomplish similar functionality using the createReactAgent(model, tools=tool, checkpointer=checkpointer) (API doc) constructor. This may be more appropriate if you are used to LangChain’s AgentExecutor class.\n", + " In this how-to, we will create our agent from scratch to be transparent (but verbose). You can accomplish similar functionality using the createReactAgent(model, tools=tool, checkpointer=checkpointer) (API doc) constructor. This may be more appropriate if you are used to LangChain's AgentExecutor class.\n", "

\n", "
\n", "\n", "## Setup\n", "\n", - "This guide will use Anthropic's Claude model. We will optionally set our API key\n", + "This guide will use OpenAI's GPT-4o model. We will optionally set our API key\n", "for [LangSmith tracing](https://smith.langchain.com/), which will give us\n", "best-in-class observability." ] @@ -61,22 +61,32 @@ { "cell_type": "code", "execution_count": 1, - "id": "f05706ae-f5c9-45e8-8c0a-2215703ee993", - "metadata": {}, - "outputs": [], + "id": "10021b8c", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Persistence: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"ANTHROPIC_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Persistence: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", + "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Persistence: LangGraphJS\";" ] }, { "cell_type": "markdown", - "id": "6b47c4dc-cabe-4ee5-aaa8-9552f5d75ed2", + "id": "5b9e252c", "metadata": {}, "source": [ "## Define the state\n", @@ -87,31 +97,31 @@ { "cell_type": "code", "execution_count": 2, - "id": "abea6e1f-c21c-4dfe-a4cd-89326e625c66", - "metadata": {}, + "id": "9fc47087", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", "\n", "interface IState {\n", - " messages: {\n", - " value: (x: BaseMessage[], y: BaseMessage[]) => BaseMessage[];\n", - " default: () => BaseMessage[];\n", - " };\n", + " messages: BaseMessage[];\n", "}\n", "\n", - "// This defines the agent state + reducer functions\n", - "const graphState: IState = {\n", + "// This defines the agent state\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", " },\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "408be71e-8d06-403a-8623-6e6c222c677a", + "id": "8bdba79f", "metadata": {}, "source": [ "## Set up the tools\n", @@ -125,21 +135,12 @@ }, { "cell_type": "code", - "execution_count": 4, - "id": "2ae6c71e-10ce-4783-9dfc-49e5b750b269", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[32m\"Cold, with a low of 13 ℃\"\u001b[39m" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": 3, + "id": "5f1e5deb", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], "source": [ "import { DynamicStructuredTool } from \"@langchain/core/tools\";\n", "import { z } from \"zod\";\n", @@ -159,12 +160,12 @@ "\n", "await searchTool.invoke({ query: \"What's the weather like?\" });\n", "\n", - "const tools = [searchTool];\n" + "const tools = [searchTool];" ] }, { "cell_type": "markdown", - "id": "78eae3ed-b322-4afe-9111-d7dc204fdb77", + "id": "a5615fd8", "metadata": {}, "source": [ "We can now wrap these tools in a simple\n", @@ -175,19 +176,22 @@ }, { "cell_type": "code", - "execution_count": 5, - "id": "638fa6e9-cb76-4838-bc8b-02edf7da5ecc", - "metadata": {}, + "execution_count": 4, + "id": "1852d2a4", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", "\n", - "const toolNode = new ToolNode(tools);\n" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", - "id": "ea94b49b-fb11-463c-b34c-9d66b5327678", + "id": "a593cc20", "metadata": {}, "source": [ "## Set up the model\n", @@ -211,19 +215,21 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "3a00435f-5e80-4a3e-a873-b308b1f781db", - "metadata": {}, + "execution_count": 5, + "id": "77c9701b", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", - "const model = new ChatAnthropic({ model: \"claude-3-haiku-20240307\" });\n" + "const model = new ChatOpenAI({ model: \"gpt-4o\" });" ] }, { "cell_type": "markdown", - "id": "3975c1ab-fcac-4448-a0cd-7969ae6f4e45", + "id": "4177b143", "metadata": {}, "source": [ "After we've done this, we should make sure the model knows that it has these\n", @@ -233,43 +239,19 @@ }, { "cell_type": "code", - "execution_count": 7, - "id": "d0d4a57f-e7d0-48d7-8626-e83185c1236d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " tools: [\n", - " {\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " description: \u001b[32m\"Use to surf the web, fetch current information, check the weather, and retrieve other information.\"\u001b[39m,\n", - " input_schema: {\n", - " type: \u001b[32m\"object\"\u001b[39m,\n", - " properties: { query: \u001b[36m[Object]\u001b[39m },\n", - " required: [ \u001b[32m\"query\"\u001b[39m ],\n", - " additionalProperties: \u001b[33mfalse\u001b[39m,\n", - " \u001b[32m\"$schema\"\u001b[39m: \u001b[32m\"http://json-schema.org/draft-07/schema#\"\u001b[39m\n", - " }\n", - " }\n", - " ]\n", - "}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": 6, + "id": "b35d9bd2", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], "source": [ - "const boundModel = model.bindTools(tools);\n", - "boundModel.kwargs;\n" + "const boundModel = model.bindTools(tools);" ] }, { "cell_type": "markdown", - "id": "1c0626d5-faae-4e24-bf1b-f01357c28627", + "id": "bbb0ae12", "metadata": {}, "source": [ "## Define the graph\n", @@ -279,133 +261,74 @@ }, { "cell_type": "code", - "execution_count": 8, - "id": "812b6d4b-9db7-490b-adae-ad9d933da56c", + "execution_count": 7, + "id": "5f85457b", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " },\n", - " tools: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Use to surf the web, fetch current information, check the weather, and retrieve other information.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " edges: Set(2) { [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ], [ \u001b[32m\"tools\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ] },\n", - " branches: {\n", - " agent: {\n", - " routeMessage: Branch {\n", - " condition: \u001b[36m[Function: routeMessage]\u001b[39m,\n", - " ends: \u001b[90mundefined\u001b[39m,\n", - " then: \u001b[90mundefined\u001b[39m\n", - " }\n", - " }\n", - " },\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", - "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { AIMessage } from \"@langchain/core/messages\";\n", + "import { RunnableConfig } from \"@langchain/core/runnables\";\n", "\n", - "const routeMessage = (state: { messages: Array }) => {\n", + "const routeMessage = (state: IState) => {\n", " const { messages } = state;\n", - " const lastMessage = messages[messages.length - 1];\n", + " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If no tools are called, we can finish (respond to the user)\n", - " if (!lastMessage.tool_calls.length) {\n", + " if (!lastMessage.tool_calls?.length) {\n", " return END;\n", " }\n", " // Otherwise if there is, we continue and call the tools\n", " return \"tools\";\n", "};\n", "\n", - "const callModel = async (state: { messages: Array }) => {\n", + "const callModel = async (\n", + " state: IState,\n", + " config: RunnableConfig,\n", + ") => {\n", " const { messages } = state;\n", - " const response = await boundModel.invoke(messages);\n", + " const response = await boundModel.invoke(messages, config);\n", " return { messages: [response] };\n", "};\n", "\n", - "const workflow = new StateGraph({\n", + "const workflow = new StateGraph({\n", " channels: graphState,\n", - "});\n", - "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "workflow.addNode(\"tools\", toolNode);\n", - "\n", - "// Set the entrypoint as `agent`\n", - "workflow.addEdge(START, \"agent\");\n", - "workflow.addConditionalEdges(\"agent\", routeMessage);\n", - "workflow.addEdge(\"tools\", \"agent\");\n", + "})\n", + " .addNode(\"agent\", callModel)\n", + " .addNode(\"tools\", toolNode)\n", + " .addEdge(START, \"agent\")\n", + " .addConditionalEdges(\"agent\", routeMessage, { finish: END, tools: \"tools\" })\n", + " .addEdge(\"tools\", \"agent\");\n", "\n", "const graph = workflow.compile();" ] }, { "cell_type": "code", - "execution_count": 9, - "id": "584468e0-c824-4401-acc5-6384e893830f", + "execution_count": 8, + "id": "41364864", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"Hi I'm Yu, niced to meet you.\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m\"Hi I'm Yu, niced to meet you.\"\u001b[39m ]\n", "-----\n", - "\n", - "It's nice to meet you too, Yu! I'm an AI assistant created by Anthropic to help with all sorts of tasks. I'm here to chat, answer questions, and assist you however I can. Please feel free to ask me anything, and I'll do my best to help!\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Nice to meet you, Yu! How can I assist you today?\n", "-----\n", "\n" ] @@ -413,7 +336,6 @@ ], "source": [ "let inputs = { messages: [[\"user\", \"Hi I'm Yu, niced to meet you.\"]] };\n", - "\n", "for await (\n", " const { messages } of await graph.stream(inputs, {\n", " streamMode: \"values\",\n", @@ -433,18 +355,31 @@ }, { "cell_type": "code", - "execution_count": 10, - "id": "c4db9d23-7ede-42d5-9288-0c31fe1f028a", + "execution_count": 9, + "id": "ccddfd4a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"Remember my name?\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m'Remember my name?'\u001b[39m ]\n", "-----\n", - "\n", - "I'm afraid I don't actually have the ability to remember your name or other personal details about you. As an AI assistant, I don't have a persistent memory of previous conversations or users. I respond based on the current context provided to me. Could you please restate your request? I'll do my best to assist you.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I cannot remember personalized details, including names, from previous interactions. However, I'd be happy to help you with any inquiries you have! How can I assist you today?\n", "-----\n", "\n" ] @@ -452,7 +387,6 @@ ], "source": [ "inputs = { messages: [[\"user\", \"Remember my name?\"]] };\n", - "\n", "for await (\n", " const { messages } of await graph.stream(inputs, {\n", " streamMode: \"values\",\n", @@ -472,7 +406,7 @@ }, { "cell_type": "markdown", - "id": "312879af-f504-4888-a737-bf74487b81ef", + "id": "3bece060", "metadata": {}, "source": [ "## Add Memory\n", @@ -484,32 +418,47 @@ }, { "cell_type": "code", - "execution_count": 11, - "id": "ef68c6a0-b4c8-4e71-acdb-7edd9023f521", - "metadata": {}, + "execution_count": 10, + "id": "217ac741", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { MemorySaver } from \"@langchain/langgraph\";\n", "\n", "// Here we only save in-memory\n", "const memory = new MemorySaver();\n", - "const persistentGraph = workflow.compile({ checkpointer: memory });\n" + "const persistentGraph = workflow.compile({ checkpointer: memory });" ] }, { "cell_type": "code", - "execution_count": 12, - "id": "24711ca1-3fd8-46ca-b2c1-affa8cd45267", + "execution_count": 11, + "id": "173c17f9", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"Hi I'm Jo, niced to meet you.\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m\"Hi I'm Jo, niced to meet you.\"\u001b[39m ]\n", "-----\n", - "\n", - "It's nice to meet you too, Jo! I'm an AI assistant created by Anthropic to be helpful, harmless, and honest. I'm here to assist you with any questions or tasks you may have. Please let me know if there is anything I can help with.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hi Jo, nice to meet you too! How can I assist you today?\n", "-----\n", "\n" ] @@ -538,24 +487,31 @@ }, { "cell_type": "code", - "execution_count": 13, - "id": "8537fd9d-083c-4096-91c5-44a2e66b18c6", + "execution_count": 12, + "id": "1162eb84", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"Remember my name?\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m'Remember my name?'\u001b[39m ]\n", "-----\n", "\n" ] }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "Certainly, I'll remember your name is Jo. It's a pleasure to meet you!\n", + "Yes, your name is Jo. How can I assist you today?\n", "-----\n", "\n" ] @@ -583,51 +539,63 @@ }, { "cell_type": "markdown", - "id": "a4ae8c30-b462-4f61-94d5-3b361c2719c2", + "id": "73902faf", "metadata": {}, "source": [ "## New Conversational Thread\n", "\n", "If we want to start a new conversation, we can pass in a different\n", "**`thread_id`**. Poof! All the memories are gone (just kidding, they'll always\n", - "live in that other thread)!" + "live in that other thread)!\n" ] }, { "cell_type": "code", - "execution_count": 14, - "id": "cf3a5244-7ff6-4ece-95fe-240341992863", - "metadata": {}, + "execution_count": 13, + "id": "58cc0612", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { - "data": { - "text/plain": [ - "{ configurable: { thread_id: \u001b[32m\"conversation-2\"\u001b[39m } }" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{ configurable: { thread_id: \u001b[32m'conversation-2'\u001b[39m } }\n" + ] } ], "source": [ - "config = { configurable: { thread_id: \"conversation-2\" } };\n" + "config = { configurable: { thread_id: \"conversation-2\" } };" ] }, { "cell_type": "code", - "execution_count": 15, - "id": "c183a4a2-57a3-4ee4-89e4-f695e1a15295", + "execution_count": 14, + "id": "25aea87b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"you forgot?\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m'you forgot?'\u001b[39m ]\n", "-----\n", - "\n", - "I'm afraid I don't have enough context to understand what you mean by \"you forgot?\". Could you please provide more details about what you are asking me about? I'd be happy to try to assist you once I have a better understanding of your question.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Could you please provide more context or clarify what you're referring to? Let me know how I can assist you further!\n", "-----\n", "\n" ] @@ -652,29 +620,27 @@ " console.log(\"-----\\n\");\n", "}" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d383edee-1a8c-4b66-b668-ee760918bede", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { + "jupytext": { + "encoding": "# -*- coding: utf-8 -*-" + }, "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/examples/how-tos/respond-in-format.ipynb b/examples/how-tos/respond-in-format.ipynb index 74477567..01bc78b7 100644 --- a/examples/how-tos/respond-in-format.ipynb +++ b/examples/how-tos/respond-in-format.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "499df080", + "id": "2c3d2c48", "metadata": {}, "source": [ "# Respond in a format\n", @@ -26,7 +26,7 @@ }, { "cell_type": "markdown", - "id": "de475e2b", + "id": "5860c111", "metadata": {}, "source": [ "## Setup\n", @@ -40,25 +40,27 @@ }, { "cell_type": "markdown", - "id": "29e2eb83", + "id": "23523fc0", "metadata": {}, "source": [ - "Next, we need to set API keys for Anthropic (the LLM we will use).\n" + "Next, we need to set API keys for OpenAI (the LLM we will use).\n" ] }, { "cell_type": "code", - "execution_count": null, - "id": "c26368af", - "metadata": {}, + "execution_count": 1, + "id": "fb3ada8f", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "// Deno.env.set(\"ANTHROPIC_API_KEY\", \"sk_...\");\n" + "// process.env.OPENAI_API_KEY = \"sk_...\";" ] }, { "cell_type": "markdown", - "id": "4c5e2855", + "id": "e7e7be8c", "metadata": {}, "source": [ "Optionally, we can set API key for\n", @@ -68,20 +70,30 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "abd1c617", - "metadata": {}, - "outputs": [], + "execution_count": 2, + "id": "bf127e2b", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Respond in Format: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls...\");\n", - "Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Respond in Format: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls...\";\n", + "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Respond in Format: LangGraphJS\";" ] }, { "cell_type": "markdown", - "id": "b629d5cd", + "id": "b214dd10", "metadata": {}, "source": [ "## Set up the State\n" @@ -89,22 +101,31 @@ }, { "cell_type": "code", - "execution_count": 11, - "id": "9ab504e6", - "metadata": {}, + "execution_count": 3, + "id": "4ad79663", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "const graphState = {\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", + "\n", + "interface IState {\n", + " messages: BaseMessage[];\n", + "}\n", + "\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", - " value: (x, y) => x.concat(y),\n", + " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", " },\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "188d68c2", + "id": "eeca531d", "metadata": {}, "source": [ "## Set up the tools\n" @@ -112,9 +133,11 @@ }, { "cell_type": "code", - "execution_count": 1, - "id": "ac780faa", - "metadata": {}, + "execution_count": 4, + "id": "d0fe8477", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { DynamicStructuredTool } from \"@langchain/core/tools\";\n", @@ -132,12 +155,12 @@ " },\n", "});\n", "\n", - "const tools = [searchTool];\n" + "const tools = [searchTool];" ] }, { "cell_type": "markdown", - "id": "324f2978", + "id": "f1a6aa07", "metadata": {}, "source": [ "We can now wrap these tools in a simple\n", @@ -146,19 +169,21 @@ }, { "cell_type": "code", - "execution_count": 4, - "id": "69b22819", - "metadata": {}, + "execution_count": 5, + "id": "df80654e", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", "\n", - "const toolNode = new ToolNode(tools);\n" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", - "id": "9ecb369c", + "id": "19f17e92", "metadata": {}, "source": [ "## Set up the model\n" @@ -166,22 +191,24 @@ }, { "cell_type": "code", - "execution_count": 5, - "id": "db695bda", - "metadata": {}, + "execution_count": 6, + "id": "9c644fb9", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", - "const model = new ChatAnthropic({\n", + "const model = new ChatOpenAI({\n", " temperature: 0,\n", - " model: \"claude-3-haiku-20240307\",\n", - "});\n" + " model: \"gpt-4o\",\n", + "});" ] }, { "cell_type": "markdown", - "id": "a074af33", + "id": "bb86967d", "metadata": {}, "source": [ "After we've done this, we should make sure the model knows that it has these\n", @@ -189,17 +216,20 @@ "the format for function calling, and then bind them to the model class.\n", "\n", "We also want to define a response schema for the language model and bind it to\n", - "the model as a function as well.\n" + "the model as a function as well." ] }, { "cell_type": "code", "execution_count": 7, - "id": "0c394188", - "metadata": {}, + "id": "e148a48b", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { z } from \"zod\";\n", + "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", "\n", "const Response = z.object({\n", " temperature: z.number().describe(\"the temperature\"),\n", @@ -208,13 +238,20 @@ "\n", "const boundModel = model.bindTools([\n", " ...tools,\n", - " { name: \"Response\", schema: Response },\n", - "]);\n" + " {\n", + " type: \"function\",\n", + " function: {\n", + " name: \"Response\",\n", + " description: \"Respond to the user using this tool.\",\n", + " parameters: zodToJsonSchema(Response),\n", + " },\n", + " },\n", + "]);" ] }, { "cell_type": "markdown", - "id": "a2afa5a4", + "id": "6e082c02", "metadata": {}, "source": [ "## Define the nodes\n" @@ -222,157 +259,83 @@ }, { "cell_type": "code", - "execution_count": 9, - "id": "ac7a65f8", - "metadata": {}, + "execution_count": 8, + "id": "960ef633", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "import { BaseMessage } from \"@langchain/core/messages\";\n", - "import { AIMessage } from \"@langchain/core/messages\";\n", + "import { AIMessage, BaseMessage } from \"@langchain/core/messages\";\n", + "import { RunnableConfig } from \"@langchain/core/runnables\";\n", + "import { END } from \"@langchain/langgraph\";\n", "\n", "// Define the function that determines whether to continue or not\n", - "const route = (state: { messages: BaseMessage[] }) => {\n", + "const route = (state: IState) => {\n", " const { messages } = state;\n", " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If there is no function call, then we finish\n", " if (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0) {\n", - " return \"end\";\n", + " return END;\n", " }\n", " // Otherwise if there is, we need to check what type of function call it is\n", " if (lastMessage.tool_calls[0].name === \"Response\") {\n", - " return \"end\";\n", + " return END;\n", " }\n", " // Otherwise we continue\n", " return \"tools\";\n", "};\n", "\n", "// Define the function that calls the model\n", - "const callModel = async (state: { messages: BaseMessage[] }) => {\n", + "const callModel = async (\n", + " state: IState,\n", + " config: RunnableConfig,\n", + ") => {\n", " const { messages } = state;\n", - " const response = await boundModel.invoke(messages);\n", + " const response = await boundModel.invoke(messages, config);\n", " // We return an object, because this will get added to the existing list\n", " return { messages: [response] };\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "3ee60938", + "id": "3d09c2d0", "metadata": {}, "source": [ - "## Define the graph\n" + "## Define the graph" ] }, { "cell_type": "code", - "execution_count": 12, - "id": "2d4584bf", + "execution_count": 9, + "id": "51179012", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " },\n", - " action: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Call to surf the web.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " edges: Set(2) { [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ], [ \u001b[32m\"action\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ] },\n", - " branches: {\n", - " agent: {\n", - " route: Branch {\n", - " condition: \u001b[36m[Function: route]\u001b[39m,\n", - " ends: { action: \u001b[32m\"action\"\u001b[39m, end: \u001b[32m\"__end__\"\u001b[39m },\n", - " then: \u001b[90mundefined\u001b[39m\n", - " }\n", - " }\n", - " },\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "import { END, StateGraph } from \"@langchain/langgraph\";\n", + "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", "\n", "// Define a new graph\n", - "const workflow = new StateGraph({\n", + "const workflow = new StateGraph({\n", " channels: graphState,\n", - "});\n", - "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "workflow.addNode(\"tools\", toolNode);\n", - "\n", - "// Set the entrypoint as `agent`\n", - "// This means that this node is the first one called\n", - "workflow.setEntryPoint(\"agent\");\n", - "\n", - "// We now add a conditional edge\n", - "workflow.addConditionalEdges(\n", - " // First, we define the start node. We use `agent`.\n", - " // This means these are the edges taken after the `agent` node is called.\n", - " \"agent\",\n", - " // Next, we pass in the function that will determine which node is called next.\n", - " route,\n", - " {\n", - " action: \"tools\",\n", - " end: END,\n", - " },\n", - ");\n", - "\n", - "// We now add a normal edge from `tools` to `agent`.\n", - "// This means that after `tools` is called, `agent` node is called next.\n", - "workflow.addEdge(\"tools\", \"agent\");\n", + "})\n", + " .addNode(\"agent\", callModel)\n", + " .addNode(\"tools\", toolNode)\n", + " .addEdge(START, \"agent\")\n", + " .addConditionalEdges(\n", + " // First, we define the start node. We use `agent`.\n", + " // This means these are the edges taken after the `agent` node is called.\n", + " \"agent\",\n", + " // Next, we pass in the function that will determine which node is called next.\n", + " route,\n", + " {\n", + " tools: \"tools\",\n", + " [END]: END,\n", + " },\n", + " )\n", + " // We now add a normal edge from `tools` to `agent`.\n", + " // This means that after `tools` is called, `agent` node is called next.\n", + " .addEdge(\"tools\", \"agent\");\n", "\n", "// Finally, we compile it!\n", "// This compiles it into a LangChain Runnable,\n", @@ -382,7 +345,7 @@ }, { "cell_type": "markdown", - "id": "661243c5", + "id": "ae844f61", "metadata": {}, "source": [ "## Use it!\n", @@ -394,8 +357,8 @@ }, { "cell_type": "code", - "execution_count": 14, - "id": "8fb4d5da", + "execution_count": 10, + "id": "3ee8225f", "metadata": {}, "outputs": [ { @@ -406,9 +369,9 @@ "\n", "---\n", "\n", - "[ai]: [object Object] \n", + "[ai]: \n", "Tools: \n", - "- search({\"query\":\"weather in sf\"})\n", + "- search({\"query\":\"current weather in San Francisco\"})\n", "\n", "---\n", "\n", @@ -416,13 +379,21 @@ "\n", "---\n", "\n", - "[ai]: [object Object],[object Object] \n", + "[ai]: \n", "Tools: \n", - "- Response({\"temperature\":65,\"other_notes\":\"Sunny with a high of 70°F and a low of 55°F. Light winds around 10 mph.\"})\n", + "- Response({\"temperature\":64,\"other_notes\":\"Partly cloudy with a gentle breeze.\"})\n", "\n", "---\n", "\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "unknown msg_type: comm_open\n", + "unknown msg_type: comm_msg\n" + ] } ], "source": [ @@ -436,8 +407,7 @@ "const prettyPrint = (message: BaseMessage) => {\n", " let txt = `[${message._getType()}]: ${message.content}`;\n", " if (\n", - " (isAIMessage(message) && (message as AIMessage)?.tool_calls?.length) ||\n", - " 0 > 0\n", + " isAIMessage(message) && (message as AIMessage)?.tool_calls?.length || 0 > 0\n", " ) {\n", " const tool_calls = (message as AIMessage)?.tool_calls\n", " ?.map((tc) => `- ${tc.name}(${JSON.stringify(tc.args)})`)\n", @@ -457,37 +427,24 @@ " console.log(\"\\n---\\n\");\n", "}" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a416e63d", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { - "jupytext": { - "text_representation": { - "extension": ".py", - "format_name": "percent", - "format_version": "1.3", - "jupytext_version": "1.16.1" - } - }, "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/examples/how-tos/stream-tokens.ipynb b/examples/how-tos/stream-tokens.ipynb index 51636dbe..172f47ad 100644 --- a/examples/how-tos/stream-tokens.ipynb +++ b/examples/how-tos/stream-tokens.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "9395cccb-a6d0-4d46-bad7-ed2d012af58a", + "id": "562ddb82", "metadata": {}, "source": [ "# Streaming Tokens\n", @@ -31,7 +31,7 @@ "
\n", "

Note

\n", "

\n", - " In this how-to, we will create our agent from scratch to be transparent (but verbose). You can accomplish similar functionality using the createReactAgent(model, tools=tool) (API doc) constructor. This may be more appropriate if you are used to LangChain’s AgentExecutor class.\n", + " In this how-to, we will create our agent from scratch to be transparent (but verbose). You can accomplish similar functionality using the createReactAgent(model, tools=tool) (API doc) constructor. This may be more appropriate if you are used to LangChain's AgentExecutor class.\n", "

\n", "
\n", "\n", @@ -47,8 +47,10 @@ { "cell_type": "code", "execution_count": 1, - "id": "f05706ae-f5c9-45e8-8c0a-2215703ee993", - "metadata": {}, + "id": "8e76833b", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { "name": "stdout", @@ -59,18 +61,18 @@ } ], "source": [ - "// Deno.env.set(\"OPENAI_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", - "process.env.LANGCHAIN_PROJECT = \"Time Travel: LangGraphJS\";\n" + "process.env.LANGCHAIN_PROJECT = \"Time Travel: LangGraphJS\";" ] }, { "cell_type": "markdown", - "id": "6b47c4dc-cabe-4ee5-aaa8-9552f5d75ed2", + "id": "ab95dc97", "metadata": {}, "source": [ "## Define the state\n", @@ -80,32 +82,32 @@ }, { "cell_type": "code", - "execution_count": 16, - "id": "abea6e1f-c21c-4dfe-a4cd-89326e625c66", - "metadata": {}, + "execution_count": 2, + "id": "1648124b", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", "\n", - "interface IState extends Record {\n", - " messages: {\n", - " value: (x: BaseMessage[], y: BaseMessage[]) => BaseMessage[];\n", - " default: () => BaseMessage[];\n", - " };\n", + "interface IState {\n", + " messages: BaseMessage[];\n", "}\n", "\n", "// This defines the agent state\n", - "const graphState: IState = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", " },\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "408be71e-8d06-403a-8623-6e6c222c677a", + "id": "da50fbd8", "metadata": {}, "source": [ "## Set up the tools\n", @@ -119,9 +121,11 @@ }, { "cell_type": "code", - "execution_count": 17, - "id": "2ae6c71e-10ce-4783-9dfc-49e5b750b269", - "metadata": {}, + "execution_count": 3, + "id": "a8f1ae1c", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { DynamicStructuredTool } from \"@langchain/core/tools\";\n", @@ -142,12 +146,12 @@ "\n", "await searchTool.invoke({ query: \"What's the weather like?\" });\n", "\n", - "const tools = [searchTool];\n" + "const tools = [searchTool];" ] }, { "cell_type": "markdown", - "id": "78eae3ed-b322-4afe-9111-d7dc204fdb77", + "id": "19b27cb3", "metadata": {}, "source": [ "We can now wrap these tools in a simple\n", @@ -158,19 +162,22 @@ }, { "cell_type": "code", - "execution_count": 18, - "id": "638fa6e9-cb76-4838-bc8b-02edf7da5ecc", - "metadata": {}, + "execution_count": 4, + "id": "f02278b1", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", "\n", - "const toolNode = new ToolNode(tools as any);\n" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", - "id": "ea94b49b-fb11-463c-b34c-9d66b5327678", + "id": "dd55ee5a", "metadata": {}, "source": [ "## Set up the model\n", @@ -189,44 +196,48 @@ "

\n", " These model requirements are not general requirements for using LangGraph - they are just requirements for this one example.\n", "

\n", - "\n" + "" ] }, { "cell_type": "code", - "execution_count": 19, - "id": "3a00435f-5e80-4a3e-a873-b308b1f781db", - "metadata": {}, + "execution_count": 5, + "id": "9c7210e7", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", - "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n" + "const model = new ChatOpenAI({ model: \"gpt-4o\" });" ] }, { "cell_type": "markdown", - "id": "3975c1ab-fcac-4448-a0cd-7969ae6f4e45", + "id": "73e59248", "metadata": {}, "source": [ "After we've done this, we should make sure the model knows that it has these\n", "tools available to call. We can do this by calling\n", - "[bindTools](https://v01.api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html#bindTools).\n" + "[bindTools](https://v01.api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html#bindTools)." ] }, { "cell_type": "code", - "execution_count": 20, - "id": "d0d4a57f-e7d0-48d7-8626-e83185c1236d", - "metadata": {}, + "execution_count": 6, + "id": "b4ff23ee", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ - "const boundModel = model.bindTools(tools);\n" + "const boundModel = model.bindTools(tools);" ] }, { "cell_type": "markdown", - "id": "1c0626d5-faae-4e24-bf1b-f01357c28627", + "id": "dbe67356", "metadata": {}, "source": [ "## Define the graph\n", @@ -240,8 +251,8 @@ }, { "cell_type": "code", - "execution_count": 27, - "id": "812b6d4b-9db7-490b-adae-ad9d933da56c", + "execution_count": 7, + "id": "0ba603bb", "metadata": {}, "outputs": [], "source": [ @@ -253,7 +264,7 @@ "} from \"@langchain/core/messages\";\n", "import { RunnableConfig } from \"@langchain/core/runnables\";\n", "\n", - "const routeMessage = (state: { messages: Array }) => {\n", + "const routeMessage = (state: IState) => {\n", " const { messages } = state;\n", " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If no tools are called, we can finish (respond to the user)\n", @@ -265,7 +276,7 @@ "};\n", "\n", "const callModel = async (\n", - " state: { messages: Array },\n", + " state: IState,\n", " config: RunnableConfig,\n", ") => {\n", " const { messages } = state;\n", @@ -281,25 +292,21 @@ " return { messages: [finalMessage] };\n", "};\n", "\n", - "const workflow = new StateGraph({\n", - " channels: graphState as any,\n", - "});\n", - "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "workflow.addNode(\"tools\", toolNode);\n", - "\n", - "// Set the entrypoint as `agent`\n", - "workflow.addEdge(START, \"agent\" as any);\n", - "workflow.addConditionalEdges(\"agent\" as any, routeMessage as any);\n", - "workflow.addEdge(\"tools\" as any, \"agent\" as any);\n", + "const workflow = new StateGraph({\n", + " channels: graphState,\n", + "})\n", + " .addNode(\"agent\", callModel)\n", + " .addNode(\"tools\", toolNode)\n", + " .addEdge(START, \"agent\")\n", + " .addConditionalEdges(\"agent\", routeMessage, { finish: END, tools: \"tools\" })\n", + " .addEdge(\"tools\", \"agent\");\n", "\n", "const graph = workflow.compile();" ] }, { "cell_type": "markdown", - "id": "4d2d8e1d", + "id": "a1ab3ad3", "metadata": {}, "source": [ "## Call streamEvents\n", @@ -310,8 +317,8 @@ }, { "cell_type": "code", - "execution_count": 28, - "id": "24711ca1-3fd8-46ca-b2c1-affa8cd45267", + "execution_count": 8, + "id": "cbcf7c39", "metadata": {}, "outputs": [ { @@ -319,7 +326,8 @@ "output_type": "stream", "text": [ "\n", - "Hi\n", + "Hello\n", + ",\n", " Jo\n", "!\n", " How\n", @@ -331,13 +339,22 @@ "?\n", "\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] } ], "source": [ "import { ChatGenerationChunk } from \"@langchain/core/outputs\";\n", "import { AIMessageChunk } from \"@langchain/core/messages\";\n", + "\n", "let config = { configurable: { thread_id: \"conversation-num-1\" } };\n", "let inputs = { messages: [[\"user\", \"Hi I'm Jo.\"]] };\n", + "\n", "for await (\n", " const event of await graph.streamEvents(inputs, {\n", " ...config,\n", @@ -359,20 +376,20 @@ }, { "cell_type": "markdown", - "id": "2a8c68d3", + "id": "055aacad", "metadata": {}, "source": [ "## How to stream tool calls\n", "\n", "Many providers support token-level streaming of tool invocations. To get the\n", "partially populated results, you can access the message chunks'\n", - "`tool_call_chunks` property.\n" + "`tool_call_chunks` property." ] }, { "cell_type": "code", - "execution_count": 29, - "id": "634a8ee5", + "execution_count": 9, + "id": "c704d23c", "metadata": {}, "outputs": [ { @@ -381,24 +398,24 @@ "text": [ "[\n", " {\n", - " name: 'search',\n", - " args: '',\n", - " id: 'call_4IOwPZBuBFBMeiwHpp7vUCv9',\n", - " index: 0\n", + " name: \u001b[32m'search'\u001b[39m,\n", + " args: \u001b[32m''\u001b[39m,\n", + " id: \u001b[32m'call_JXZDSJiBy6e0WKW27pohbYGH'\u001b[39m,\n", + " index: \u001b[33m0\u001b[39m\n", " }\n", "]\n", - "[ { name: undefined, args: '{\"', id: undefined, index: 0 } ]\n", - "[ { name: undefined, args: 'query', id: undefined, index: 0 } ]\n", - "[ { name: undefined, args: '\":\"', id: undefined, index: 0 } ]\n", - "[ { name: undefined, args: 'weather', id: undefined, index: 0 } ]\n", - "[ { name: undefined, args: ' forecast', id: undefined, index: 0 } ]\n", - "[ { name: undefined, args: ' for', id: undefined, index: 0 } ]\n", - "[ { name: undefined, args: ' today', id: undefined, index: 0 } ]\n", - "[ { name: undefined, args: '\"}', id: undefined, index: 0 } ]\n", + "[ { name: \u001b[90mundefined\u001b[39m, args: \u001b[32m'{\"'\u001b[39m, id: \u001b[90mundefined\u001b[39m, index: \u001b[33m0\u001b[39m } ]\n", + "[ { name: \u001b[90mundefined\u001b[39m, args: \u001b[32m'query'\u001b[39m, id: \u001b[90mundefined\u001b[39m, index: \u001b[33m0\u001b[39m } ]\n", + "[ { name: \u001b[90mundefined\u001b[39m, args: \u001b[32m'\":\"'\u001b[39m, id: \u001b[90mundefined\u001b[39m, index: \u001b[33m0\u001b[39m } ]\n", + "[ { name: \u001b[90mundefined\u001b[39m, args: \u001b[32m'current'\u001b[39m, id: \u001b[90mundefined\u001b[39m, index: \u001b[33m0\u001b[39m } ]\n", + "[ { name: \u001b[90mundefined\u001b[39m, args: \u001b[32m' weather'\u001b[39m, id: \u001b[90mundefined\u001b[39m, index: \u001b[33m0\u001b[39m } ]\n", + "[ { name: \u001b[90mundefined\u001b[39m, args: \u001b[32m'\"}'\u001b[39m, id: \u001b[90mundefined\u001b[39m, index: \u001b[33m0\u001b[39m } ]\n", "\n", "\n", - "Today's\n", + "The\n", + " current\n", " weather\n", + " today\n", " is\n", " cold\n", ",\n", @@ -410,8 +427,18 @@ "3\n", "℃\n", ".\n", + " Stay\n", + " warm\n", + "!\n", "\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] } ], "source": [ @@ -436,12 +463,6 @@ " }\n", "}" ] - }, - { - "cell_type": "markdown", - "id": "7f43897a", - "metadata": {}, - "source": [] } ], "metadata": { diff --git a/examples/how-tos/subgraph.ipynb b/examples/how-tos/subgraph.ipynb index 49f4004c..addee010 100644 --- a/examples/how-tos/subgraph.ipynb +++ b/examples/how-tos/subgraph.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "f84bb90b", "metadata": {}, "source": [ "# Subgraphs\n", @@ -36,21 +37,33 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 1, + "id": "d8624447", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Configuration: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"OPENAI_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "// Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Configuration: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", + "// process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Configuration: LangGraphJS\";" ] }, { "cell_type": "markdown", + "id": "f7c8afef", "metadata": {}, "source": [ "## Create Parent + Child Graphs\n", @@ -64,25 +77,35 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 2, + "id": "38d1f06f", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[WARN]: You have enabled LangSmith tracing without backgrounding callbacks.\n", + "[WARN]: If you are not using a serverless environment where you must wait for tracing calls to finish,\n", + "[WARN]: we suggest setting \"process.env.LANGCHAIN_CALLBACKS_BACKGROUND=true\" to avoid additional latency.\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ "{\n", - " name: \"test\",\n", + " name: \u001b[32m'test'\u001b[39m,\n", " path: [\n", - " \"grandparent\",\n", - " \"parent\",\n", - " \"grandparent\",\n", - " \"parent\",\n", - " \"child_start\",\n", - " \"child_middle\",\n", - " \"child_end\",\n", - " \"sibling\",\n", - " \"fin\"\n", + " \u001b[32m'grandparent'\u001b[39m,\n", + " \u001b[32m'parent'\u001b[39m,\n", + " \u001b[32m'grandparent'\u001b[39m,\n", + " \u001b[32m'parent'\u001b[39m,\n", + " \u001b[32m'child_start'\u001b[39m,\n", + " \u001b[32m'child_middle'\u001b[39m,\n", + " \u001b[32m'child_end'\u001b[39m,\n", + " \u001b[32m'sibling'\u001b[39m,\n", + " \u001b[32m'fin'\u001b[39m\n", " ]\n", "}\n" ] @@ -90,6 +113,7 @@ ], "source": [ "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", "\n", "function reduceList(\n", " left?: string[] | string,\n", @@ -110,17 +134,11 @@ "\n", "// Define the state type\n", "interface IState {\n", - " name: {\n", - " value: (x: string, y?: string) => string;\n", - " default: () => string;\n", - " };\n", - " path: {\n", - " value: (x?: string[], y?: string[] | string) => string[];\n", - " default: () => string[];\n", - " };\n", + " name: string;\n", + " path: string[];\n", "}\n", "\n", - "const graphState: IState = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " name: {\n", " // Overwrite name if a new one is provided\n", " value: (x: string, y?: string) => (y ? y : x),\n", @@ -133,33 +151,35 @@ " },\n", "};\n", "\n", - "const childBuilder = new StateGraph({ channels: graphState });\n", - "childBuilder.addNode(\"child_start\", (state) => ({ path: [\"child_start\"] }));\n", - "childBuilder.addEdge(START, \"child_start\");\n", - "childBuilder.addNode(\"child_middle\", (state) => ({ path: [\"child_middle\"] }));\n", - "childBuilder.addNode(\"child_end\", (state) => ({ path: [\"child_end\"] }));\n", - "childBuilder.addEdge(\"child_start\", \"child_middle\");\n", - "childBuilder.addEdge(\"child_middle\", \"child_end\");\n", - "childBuilder.addEdge(\"child_end\", END);\n", - "\n", - "const builder = new StateGraph({\n", + "const childBuilder = new StateGraph({ channels: graphState });\n", + "childBuilder\n", + " .addNode(\"child_start\", (state) => ({ path: [\"child_start\"] }))\n", + " .addEdge(START, \"child_start\")\n", + " .addNode(\"child_middle\", (state) => ({ path: [\"child_middle\"] }))\n", + " .addNode(\"child_end\", (state) => ({ path: [\"child_end\"] }))\n", + " .addEdge(\"child_start\", \"child_middle\")\n", + " .addEdge(\"child_middle\", \"child_end\")\n", + " .addEdge(\"child_end\", END);\n", + "\n", + "const builder = new StateGraph({\n", " channels: graphState,\n", "});\n", "\n", - "builder.addNode(\"grandparent\", (state) => ({ path: [\"grandparent\"] }));\n", - "builder.addEdge(START, \"grandparent\");\n", - "builder.addNode(\"parent\", (state) => ({ path: [\"parent\"] }));\n", - "builder.addNode(\"child\", childBuilder.compile());\n", - "builder.addNode(\"sibling\", (state) => ({ path: [\"sibling\"] }));\n", - "builder.addNode(\"fin\", (state) => ({ path: [\"fin\"] }));\n", - "\n", - "// Add connections\n", - "builder.addEdge(\"grandparent\", \"parent\");\n", - "builder.addEdge(\"parent\", \"child\");\n", - "builder.addEdge(\"parent\", \"sibling\");\n", - "builder.addEdge(\"child\", \"fin\");\n", - "builder.addEdge(\"sibling\", \"fin\");\n", - "builder.addEdge(\"fin\", END);\n", + "builder\n", + " .addNode(\"grandparent\", (state) => ({ path: [\"grandparent\"] }))\n", + " .addEdge(START, \"grandparent\")\n", + " .addNode(\"parent\", (state) => ({ path: [\"parent\"] }))\n", + " .addNode(\"child\", childBuilder.compile())\n", + " .addNode(\"sibling\", (state) => ({ path: [\"sibling\"] }))\n", + " .addNode(\"fin\", (state) => ({ path: [\"fin\"] }))\n", + " // Add connections\n", + " .addEdge(\"grandparent\", \"parent\")\n", + " .addEdge(\"parent\", \"child\")\n", + " .addEdge(\"parent\", \"sibling\")\n", + " .addEdge(\"child\", \"fin\")\n", + " .addEdge(\"sibling\", \"fin\")\n", + " .addEdge(\"fin\", END);\n", + "\n", "const graph = builder.compile();\n", "\n", "const result1 = await graph.invoke({ name: \"test\" });\n", @@ -168,6 +188,7 @@ }, { "cell_type": "markdown", + "id": "9f3f4e12", "metadata": {}, "source": [ "Notice here that the `[\"grandparent\", \"parent\"]` sequence is duplicated! This is\n", @@ -192,28 +213,10 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, + "id": "34a51908", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " name: \"test\",\n", - " path: [\n", - " { val: \"grandparent\", id: \"1f8694a6-daba-4acf-86c5-394ceff7d9f1\" },\n", - " { val: \"parent\", id: \"7e9505db-be27-465c-bdf3-fca5981449ff\" },\n", - " { val: \"child_start\", id: \"e0c66b89-13e4-441b-93a2-3e7925294688\" },\n", - " { val: \"child_middle\", id: \"ceeaae44-1342-42c5-a670-c69601586433\" },\n", - " { val: \"child_end\", id: \"a5b00b2d-abca-4d87-ac72-8c85c3b7aa24\" },\n", - " { val: \"sibling\", id: \"4acec7d3-a573-4d9d-96c2-ebcad5f18227\" },\n", - " { val: \"fin\", id: \"2f72a96c-1097-40a5-b19f-558dcf89d8fc\" }\n", - " ]\n", - "}\n" - ] - } - ], + "outputs": [], "source": [ "import { v4 as uuidv4 } from \"uuid\";\n", "\n", @@ -264,17 +267,11 @@ "}\n", "\n", "interface IStateWithIds {\n", - " name: {\n", - " value: (x: string, y?: string) => string;\n", - " default: () => string;\n", - " };\n", - " path: {\n", - " value: (x?: ValWithId[], y?: ValWithId[] | ValWithId) => ValWithId[];\n", - " default: () => ValWithId[];\n", - " };\n", + " name: string;\n", + " path: ValWithId[];\n", "}\n", "\n", - "const graphState2: IStateWithIds = {\n", + "const graphState2: StateGraphArgs[\"channels\"] = {\n", " name: {\n", " // Overwrite name if a new one is provided\n", " value: (x: string, y?: string) => (y ? y : x),\n", @@ -287,42 +284,46 @@ " },\n", "};\n", "\n", - "const childBuilderWithIds = new StateGraph({ channels: graphState2 });\n", - "\n", - "childBuilderWithIds.addNode(\"child_start\", (state) => ({\n", - " path: [{ val: \"child_start\" }],\n", - "}));\n", - "childBuilderWithIds.setEntryPoint(\"child_start\");\n", - "childBuilderWithIds.addNode(\"child_middle\", (state) => ({\n", - " path: [{ val: \"child_middle\" }],\n", - "}));\n", - "childBuilderWithIds.addNode(\"child_end\", (state) => ({\n", - " path: [{ val: \"child_end\" }],\n", - "}));\n", - "childBuilderWithIds.addEdge(\"child_start\", \"child_middle\");\n", - "childBuilderWithIds.addEdge(\"child_middle\", \"child_end\");\n", - "childBuilderWithIds.setFinishPoint(\"child_end\");\n", - "\n", - "const builderWithIds = new StateGraph({\n", + "const childBuilderWithIds = new StateGraph({\n", + " channels: graphState2,\n", + "});\n", + "\n", + "childBuilderWithIds\n", + " .addNode(\"child_start\", (state) => ({\n", + " path: [{ val: \"child_start\" }],\n", + " }))\n", + " .addEdge(START, \"child_start\")\n", + " .addNode(\"child_middle\", (state) => ({\n", + " path: [{ val: \"child_middle\" }],\n", + " }))\n", + " .addNode(\"child_end\", (state) => ({\n", + " path: [{ val: \"child_end\" }],\n", + " }))\n", + " .addEdge(\"child_start\", \"child_middle\")\n", + " .addEdge(\"child_middle\", \"child_end\")\n", + " .addEdge(\"child_end\", END);\n", + "\n", + "const builderWithIds = new StateGraph({\n", " channels: graphState2,\n", "});\n", "\n", - "builderWithIds.addNode(\"grandparent\", (state) => ({\n", - " path: [{ val: \"grandparent\" }],\n", - "}));\n", - "builderWithIds.setEntryPoint(\"grandparent\");\n", - "builderWithIds.addNode(\"parent\", (state) => ({ path: [{ val: \"parent\" }] }));\n", - "builderWithIds.addNode(\"child\", childBuilderWithIds.compile());\n", - "builderWithIds.addNode(\"sibling\", (state) => ({ path: [{ val: \"sibling\" }] }));\n", - "builderWithIds.addNode(\"fin\", (state) => ({ path: [{ val: \"fin\" }] }));\n", - "\n", - "// Add connections\n", - "builderWithIds.addEdge(\"grandparent\", \"parent\");\n", - "builderWithIds.addEdge(\"parent\", \"child\");\n", - "builderWithIds.addEdge(\"parent\", \"sibling\");\n", - "builderWithIds.addEdge(\"child\", \"fin\");\n", - "builderWithIds.addEdge(\"sibling\", \"fin\");\n", - "builderWithIds.setFinishPoint(\"fin\");\n", + "builderWithIds\n", + " .addNode(\"grandparent\", (state) => ({\n", + " path: [{ val: \"grandparent\" }],\n", + " }))\n", + " .addEdge(START, \"grandparent\")\n", + " .addNode(\"parent\", (state) => ({ path: [{ val: \"parent\" }] }))\n", + " .addNode(\"child\", childBuilderWithIds.compile())\n", + " .addNode(\"sibling\", (state) => ({ path: [{ val: \"sibling\" }] }))\n", + " .addNode(\"fin\", (state) => ({ path: [{ val: \"fin\" }] }))\n", + " // Add connections\n", + " .addEdge(\"grandparent\", \"parent\")\n", + " .addEdge(\"parent\", \"child\")\n", + " .addEdge(\"parent\", \"sibling\")\n", + " .addEdge(\"child\", \"fin\")\n", + " .addEdge(\"sibling\", \"fin\")\n", + " .addEdge(\"fin\", END);\n", + "\n", "const graphWithIds = builderWithIds.compile();\n", "\n", "const result2 = await graphWithIds.invoke({ name: \"test\" });\n", @@ -331,6 +332,7 @@ }, { "cell_type": "markdown", + "id": "5dad1a18", "metadata": {}, "source": [ "Duplicates are gone!" @@ -338,25 +340,32 @@ }, { "cell_type": "markdown", + "id": "b1b7f4e1", "metadata": {}, - "source": [] + "source": [ + "```\n", + "```" + ] } ], "metadata": { "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/examples/how-tos/time-travel.ipynb b/examples/how-tos/time-travel.ipynb index bbb3d80f..83f7704f 100644 --- a/examples/how-tos/time-travel.ipynb +++ b/examples/how-tos/time-travel.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "9395cccb-a6d0-4d46-bad7-ed2d012af58a", + "id": "90616e9e", "metadata": {}, "source": [ "# Get State and Update State\n", @@ -42,7 +42,7 @@ "
\n", "

Note

\n", "

\n", - " In this how-to, we will create our agent from scratch to be transparent (but verbose). You can accomplish similar functionality using the createReactAgent(model, tools=tool, checkpointer=checkpointer) (API doc) constructor. This may be more appropriate if you are used to LangChain’s AgentExecutor class.\n", + " In this how-to, we will create our agent from scratch to be transparent (but verbose). You can accomplish similar functionality using the createReactAgent(model, tools=tool, checkpointer=checkpointer) (API doc) constructor. This may be more appropriate if you are used to LangChain's AgentExecutor class.\n", "

\n", "
\n", "\n", @@ -56,22 +56,32 @@ { "cell_type": "code", "execution_count": 1, - "id": "f05706ae-f5c9-45e8-8c0a-2215703ee993", - "metadata": {}, - "outputs": [], + "id": "9a7df1d0", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time Travel: LangGraphJS\n" + ] + } + ], "source": [ - "// Deno.env.set(\"OPENAI_API_KEY\", \"sk_...\");\n", + "// process.env.OPENAI_API_KEY = \"sk_...\";\n", "\n", "// Optional, add tracing in LangSmith\n", - "// Deno.env.set(\"LANGCHAIN_API_KEY\", \"ls__...\");\n", - "Deno.env.set(\"LANGCHAIN_CALLBACKS_BACKGROUND\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_TRACING_V2\", \"true\");\n", - "Deno.env.set(\"LANGCHAIN_PROJECT\", \"Time Travel: LangGraphJS\");\n" + "// process.env.LANGCHAIN_API_KEY = \"ls__...\";\n", + "process.env.LANGCHAIN_CALLBACKS_BACKGROUND = \"true\";\n", + "process.env.LANGCHAIN_TRACING_V2 = \"true\";\n", + "process.env.LANGCHAIN_PROJECT = \"Time Travel: LangGraphJS\";" ] }, { "cell_type": "markdown", - "id": "6b47c4dc-cabe-4ee5-aaa8-9552f5d75ed2", + "id": "e79ba1c0", "metadata": {}, "source": [ "## Define the state\n", @@ -82,31 +92,31 @@ { "cell_type": "code", "execution_count": 2, - "id": "abea6e1f-c21c-4dfe-a4cd-89326e625c66", - "metadata": {}, + "id": "44968352", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { BaseMessage } from \"@langchain/core/messages\";\n", + "import { StateGraphArgs } from \"@langchain/langgraph\";\n", "\n", "interface IState {\n", - " messages: {\n", - " value: (x: BaseMessage[], y: BaseMessage[]) => BaseMessage[];\n", - " default: () => BaseMessage[];\n", - " };\n", + " messages: BaseMessage[];\n", "}\n", "\n", "// This defines the agent state\n", - "const graphState: IState = {\n", + "const graphState: StateGraphArgs[\"channels\"] = {\n", " messages: {\n", " value: (x: BaseMessage[], y: BaseMessage[]) => x.concat(y),\n", " default: () => [],\n", " },\n", - "};\n" + "};" ] }, { "cell_type": "markdown", - "id": "408be71e-8d06-403a-8623-6e6c222c677a", + "id": "47c88187", "metadata": {}, "source": [ "## Set up the tools\n", @@ -121,20 +131,11 @@ { "cell_type": "code", "execution_count": 3, - "id": "2ae6c71e-10ce-4783-9dfc-49e5b750b269", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\u001b[32m\"Cold, with a low of 13 ℃\"\u001b[39m" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "id": "b22edfc4", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], "source": [ "import { DynamicStructuredTool } from \"@langchain/core/tools\";\n", "import { z } from \"zod\";\n", @@ -154,12 +155,12 @@ "\n", "await searchTool.invoke({ query: \"What's the weather like?\" });\n", "\n", - "const tools = [searchTool];\n" + "const tools = [searchTool];" ] }, { "cell_type": "markdown", - "id": "78eae3ed-b322-4afe-9111-d7dc204fdb77", + "id": "7c764430", "metadata": {}, "source": [ "We can now wrap these tools in a simple\n", @@ -170,19 +171,22 @@ }, { "cell_type": "code", - "execution_count": 4, - "id": "638fa6e9-cb76-4838-bc8b-02edf7da5ecc", - "metadata": {}, + "execution_count": 8, + "id": "0cc63f1f", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ToolNode } from \"@langchain/langgraph/prebuilt\";\n", + "import { BaseMessage } from \"@langchain/core/messages\";\n", "\n", - "const toolNode = new ToolNode(tools);\n" + "const toolNode = new ToolNode<{ messages: BaseMessage[] }>(tools);" ] }, { "cell_type": "markdown", - "id": "ea94b49b-fb11-463c-b34c-9d66b5327678", + "id": "cc409cd5", "metadata": {}, "source": [ "## Set up the model\n", @@ -206,20 +210,21 @@ }, { "cell_type": "code", - "execution_count": 23, - "id": "3a00435f-5e80-4a3e-a873-b308b1f781db", - "metadata": {}, + "execution_count": 9, + "id": "dae9ab9c", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "import { ChatOpenAI } from \"@langchain/openai\";\n", "\n", - "// const model = new ChatAnthropic({ model: \"claude-3-haiku-20240307\" });\n", - "const model = new ChatOpenAI({ model: \"gpt-4o\" });\n" + "const model = new ChatOpenAI({ model: \"gpt-4o\" });" ] }, { "cell_type": "markdown", - "id": "3975c1ab-fcac-4448-a0cd-7969ae6f4e45", + "id": "b5cfd558", "metadata": {}, "source": [ "After we've done this, we should make sure the model knows that it has these\n", @@ -229,46 +234,19 @@ }, { "cell_type": "code", - "execution_count": 24, - "id": "d0d4a57f-e7d0-48d7-8626-e83185c1236d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{\n", - " tools: [\n", - " {\n", - " type: \u001b[32m\"function\"\u001b[39m,\n", - " function: {\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " description: \u001b[32m\"Use to surf the web, fetch current information, check the weather, and retrieve other information.\"\u001b[39m,\n", - " parameters: {\n", - " type: \u001b[32m\"object\"\u001b[39m,\n", - " properties: \u001b[36m[Object]\u001b[39m,\n", - " required: \u001b[36m[Array]\u001b[39m,\n", - " additionalProperties: \u001b[33mfalse\u001b[39m,\n", - " \u001b[32m\"$schema\"\u001b[39m: \u001b[32m\"http://json-schema.org/draft-07/schema#\"\u001b[39m\n", - " }\n", - " }\n", - " }\n", - " ]\n", - "}" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": 10, + "id": "ca438e74", + "metadata": { + "lines_to_next_cell": 2 + }, + "outputs": [], "source": [ - "const boundModel = model.bindTools(tools);\n", - "boundModel.kwargs;\n" + "const boundModel = model.bindTools(tools);" ] }, { "cell_type": "markdown", - "id": "1c0626d5-faae-4e24-bf1b-f01357c28627", + "id": "4a2b8a4f", "metadata": {}, "source": [ "## Define the graph\n", @@ -282,92 +260,21 @@ }, { "cell_type": "code", - "execution_count": 25, - "id": "812b6d4b-9db7-490b-adae-ad9d933da56c", + "execution_count": 11, + "id": "1a29ec2a", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "StateGraph {\n", - " nodes: {\n", - " agent: RunnableLambda {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: { func: \u001b[36m[AsyncFunction: callModel]\u001b[39m },\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"runnables\"\u001b[39m ],\n", - " func: \u001b[36m[AsyncFunction: callModel]\u001b[39m\n", - " },\n", - " tools: ToolNode {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: {},\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"tools\"\u001b[39m,\n", - " lc_namespace: [ \u001b[32m\"langgraph\"\u001b[39m ],\n", - " func: \u001b[36m[Function: func]\u001b[39m,\n", - " tags: \u001b[90mundefined\u001b[39m,\n", - " config: { tags: [] },\n", - " trace: \u001b[33mtrue\u001b[39m,\n", - " recurse: \u001b[33mtrue\u001b[39m,\n", - " tools: [\n", - " DynamicStructuredTool {\n", - " lc_serializable: \u001b[33mfalse\u001b[39m,\n", - " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", - " lc_runnable: \u001b[33mtrue\u001b[39m,\n", - " name: \u001b[32m\"search\"\u001b[39m,\n", - " verbose: \u001b[33mfalse\u001b[39m,\n", - " callbacks: \u001b[90mundefined\u001b[39m,\n", - " tags: [],\n", - " metadata: {},\n", - " returnDirect: \u001b[33mfalse\u001b[39m,\n", - " description: \u001b[32m\"Use to surf the web, fetch current information, check the weather, and retrieve other information.\"\u001b[39m,\n", - " func: \u001b[36m[AsyncFunction: func]\u001b[39m,\n", - " schema: \u001b[36m[ZodObject]\u001b[39m\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " edges: Set(2) { [ \u001b[32m\"__start__\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ], [ \u001b[32m\"tools\"\u001b[39m, \u001b[32m\"agent\"\u001b[39m ] },\n", - " branches: {\n", - " agent: {\n", - " routeMessage: Branch {\n", - " condition: \u001b[36m[Function: routeMessage]\u001b[39m,\n", - " ends: \u001b[90mundefined\u001b[39m,\n", - " then: \u001b[90mundefined\u001b[39m\n", - " }\n", - " }\n", - " },\n", - " entryPoint: \u001b[90mundefined\u001b[39m,\n", - " compiled: \u001b[33mtrue\u001b[39m,\n", - " supportMultipleEdges: \u001b[33mtrue\u001b[39m,\n", - " channels: {\n", - " messages: BinaryOperatorAggregate {\n", - " lc_graph_name: \u001b[32m\"BinaryOperatorAggregate\"\u001b[39m,\n", - " value: [],\n", - " operator: \u001b[36m[Function: value]\u001b[39m,\n", - " initialValueFactory: \u001b[36m[Function: default]\u001b[39m\n", - " }\n", - " },\n", - " waitingEdges: Set(0) {}\n", - "}" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "import { AIMessage, BaseMessage } from \"@langchain/core/messages\";\n", + "import { END, START, StateGraph } from \"@langchain/langgraph\";\n", + "import { AIMessage } from \"@langchain/core/messages\";\n", "import { RunnableConfig } from \"@langchain/core/runnables\";\n", - "import { END, MemorySaver, START, StateGraph } from \"@langchain/langgraph\";\n", + "import { MemorySaver } from \"@langchain/langgraph\";\n", "\n", - "const routeMessage = (state: { messages: Array }) => {\n", + "const routeMessage = (state: IState) => {\n", " const { messages } = state;\n", " const lastMessage = messages[messages.length - 1] as AIMessage;\n", " // If no tools are called, we can finish (respond to the user)\n", - " if (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0) {\n", + " if (!lastMessage?.tool_calls?.length) {\n", " return END;\n", " }\n", " // Otherwise if there is, we continue and call the tools\n", @@ -375,7 +282,7 @@ "};\n", "\n", "const callModel = async (\n", - " state: { messages: Array },\n", + " state: IState,\n", " config: RunnableConfig,\n", ") => {\n", " const { messages } = state;\n", @@ -383,18 +290,14 @@ " return { messages: [response] };\n", "};\n", "\n", - "const workflow = new StateGraph({\n", + "const workflow = new StateGraph({\n", " channels: graphState,\n", - "});\n", - "\n", - "// Define the two nodes we will cycle between\n", - "workflow.addNode(\"agent\", callModel);\n", - "workflow.addNode(\"tools\", toolNode);\n", - "\n", - "// Set the entrypoint as `agent`\n", - "workflow.addEdge(START, \"agent\");\n", - "workflow.addConditionalEdges(\"agent\", routeMessage);\n", - "workflow.addEdge(\"tools\", \"agent\");\n", + "})\n", + " .addNode(\"agent\", callModel)\n", + " .addNode(\"tools\", toolNode)\n", + " .addEdge(START, \"agent\")\n", + " .addConditionalEdges(\"agent\", routeMessage, { finish: END, tools: \"tools\" })\n", + " .addEdge(\"tools\", \"agent\");\n", "\n", "// Here we only save in-memory\n", "let memory = new MemorySaver();\n", @@ -403,7 +306,7 @@ }, { "cell_type": "markdown", - "id": "4d2d8e1d", + "id": "a6dd42a3", "metadata": {}, "source": [ "## Interacting with the Agent\n", @@ -414,24 +317,31 @@ }, { "cell_type": "code", - "execution_count": 26, - "id": "24711ca1-3fd8-46ca-b2c1-affa8cd45267", + "execution_count": 12, + "id": "0749329a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"Hi I'm Jo.\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m\"Hi I'm Jo.\"\u001b[39m ]\n", "-----\n", "\n" ] }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "Hello Jo! How can I assist you today?\n", + "Hello, Jo! How can I assist you today?\n", "-----\n", "\n" ] @@ -460,7 +370,7 @@ }, { "cell_type": "markdown", - "id": "a11210c6", + "id": "221f323d", "metadata": {}, "source": [ "See LangSmith example run here\n", @@ -474,53 +384,43 @@ }, { "cell_type": "code", - "execution_count": 27, - "id": "41d017ef", - "metadata": {}, + "execution_count": 13, + "id": "6ff5468d", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { - "data": { - "text/plain": [ - "{\n", - " messages: [\n", - " [ \u001b[32m\"user\"\u001b[39m, \u001b[32m\"Hi I'm Jo.\"\u001b[39m ],\n", - " AIMessage {\n", - " lc_serializable: \u001b[33mtrue\u001b[39m,\n", - " lc_kwargs: {\n", - " content: \u001b[32m\"Hello Jo! How can I assist you today?\"\u001b[39m,\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: { tokenUsage: \u001b[36m[Object]\u001b[39m, finish_reason: \u001b[32m\"stop\"\u001b[39m }\n", - " },\n", - " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", - " content: \u001b[32m\"Hello Jo! How can I assist you today?\"\u001b[39m,\n", - " name: \u001b[90mundefined\u001b[39m,\n", - " additional_kwargs: {},\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: \u001b[33m11\u001b[39m, promptTokens: \u001b[33m68\u001b[39m, totalTokens: \u001b[33m79\u001b[39m },\n", - " finish_reason: \u001b[32m\"stop\"\u001b[39m\n", - " },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - "}" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " messages: [\n", + " [ \u001b[32m'user'\u001b[39m, \u001b[32m\"Hi I'm Jo.\"\u001b[39m ],\n", + " AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", + " lc_namespace: \u001b[36m[Array]\u001b[39m,\n", + " content: \u001b[32m'Hello, Jo! How can I assist you today?'\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {},\n", + " response_metadata: \u001b[36m[Object]\u001b[39m,\n", + " tool_calls: [],\n", + " invalid_tool_calls: []\n", + " }\n", + " ]\n", + "}\n" + ] } ], "source": [ "let checkpoint = await graph.getState(config);\n", - "checkpoint.values;\n" + "checkpoint.values;" ] }, { "cell_type": "markdown", - "id": "4849e28b", + "id": "571077e2", "metadata": {}, "source": [ "The current state is the two messages we've seen above, 1. the HumanMessage we\n", @@ -532,28 +432,27 @@ }, { "cell_type": "code", - "execution_count": 28, - "id": "1b6df77b", - "metadata": {}, + "execution_count": 14, + "id": "22b25946", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 28, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "[]\n" + ] } ], "source": [ - "checkpoint.next;\n" + "checkpoint.next;" ] }, { "cell_type": "markdown", - "id": "87346938", + "id": "889cd8ce", "metadata": {}, "source": [ "## Let's get it to execute a tool\n", @@ -564,30 +463,43 @@ }, { "cell_type": "code", - "execution_count": 29, - "id": "8537fd9d-083c-4096-91c5-44a2e66b18c6", + "execution_count": 15, + "id": "873b3438", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"What's the weather like in SF currently?\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m\"What's the weather like in SF currently?\"\u001b[39m ]\n", "-----\n", "\n", "[\n", " {\n", - " name: \"search\",\n", - " args: { query: \"current weather in San Francisco\" },\n", - " id: \"call_9nR1UWZgVwW3BtEeMSmjTkHp\"\n", + " name: \u001b[32m'search'\u001b[39m,\n", + " args: { query: \u001b[32m'current weather in San Francisco'\u001b[39m },\n", + " id: \u001b[32m'call_3dj210cRFWwO6ZXbKskiXqn6'\u001b[39m\n", " }\n", "]\n", "-----\n", "\n", "Cold, with a low of 13 ℃\n", "-----\n", - "\n", - "The current weather in San Francisco is cool, with a temperature of around 13°C (55°F). Do you need any other information?\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The current weather in San Francisco is 13°C and cold.\n", "-----\n", "\n" ] @@ -615,7 +527,7 @@ }, { "cell_type": "markdown", - "id": "6ffc172a", + "id": "6384c1e3", "metadata": {}, "source": [ "See the trace of the above execution here:\n", @@ -629,7 +541,7 @@ }, { "cell_type": "markdown", - "id": "d383edee-1a8c-4b66-b668-ee760918bede", + "id": "3a3fe0ce", "metadata": {}, "source": [ "### Pause before tools\n", @@ -642,22 +554,22 @@ }, { "cell_type": "code", - "execution_count": 30, - "id": "fdf5046f", + "execution_count": 16, + "id": "736be42e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[ \"user\", \"What's the weather like in SF currently?\" ]\n", + "[ \u001b[32m'user'\u001b[39m, \u001b[32m\"What's the weather like in SF currently?\"\u001b[39m ]\n", "-----\n", "\n", "[\n", " {\n", - " name: \"search\",\n", - " args: { query: \"current weather in San Francisco\" },\n", - " id: \"call_ONueXiVpSg1lC5MIFDVmDswq\"\n", + " name: \u001b[32m'search'\u001b[39m,\n", + " args: { query: \u001b[32m'current weather in San Francisco'\u001b[39m },\n", + " id: \u001b[32m'call_WRrsB6evR9HRlKvTpeKdTeMA'\u001b[39m\n", " }\n", "]\n", "-----\n", @@ -693,7 +605,7 @@ }, { "cell_type": "markdown", - "id": "c0172432", + "id": "bf27f2b4", "metadata": {}, "source": [ "## Get State\n", @@ -704,29 +616,28 @@ }, { "cell_type": "code", - "execution_count": 31, - "id": "41e2d0f3", - "metadata": {}, + "execution_count": 17, + "id": "0f434f69", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { - "data": { - "text/plain": [ - "[ \u001b[32m\"tools\"\u001b[39m ]" - ] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "[ \u001b[32m'tools'\u001b[39m ]\n" + ] } ], "source": [ "let snapshot = await graphWithInterrupt.getState(config);\n", - "snapshot.next;\n" + "snapshot.next;" ] }, { "cell_type": "markdown", - "id": "f1c50e6b", + "id": "1f78ad8f", "metadata": {}, "source": [ "## Resume\n", @@ -737,8 +648,8 @@ }, { "cell_type": "code", - "execution_count": 33, - "id": "7f43897a", + "execution_count": 18, + "id": "fd4d7eff", "metadata": {}, "outputs": [ { @@ -747,8 +658,21 @@ "text": [ "Cold, with a low of 13 ℃\n", "-----\n", - "\n", - "The current weather in San Francisco is cold, with a temperature of around 13℃.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The current weather in San Francisco is cold, with a low of 13°C (55°F).\n", "-----\n", "\n" ] @@ -775,7 +699,7 @@ }, { "cell_type": "markdown", - "id": "24c2e176", + "id": "2885d91d", "metadata": {}, "source": [ "## Check full history\n", @@ -785,376 +709,99 @@ }, { "cell_type": "code", - "execution_count": 34, - "id": "41cb0ae6", - "metadata": {}, + "execution_count": 24, + "id": "bc7acb70", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{\n", - " values: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"\",\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object]\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: { tool_calls: [Array] },\n", - " response_metadata: { tokenUsage: [Object], finish_reason: \"tool_calls\" },\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: []\n", - " },\n", - " ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " name: \"search\",\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " name: \"search\",\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\"\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"The current weather in San Francisco is cold, with a temperature of around 13℃.\",\n", - " tool_calls: [],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: {},\n", - " response_metadata: [Object]\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"The current weather in San Francisco is cold, with a temperature of around 13℃.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: { tokenUsage: [Object], finish_reason: \"stop\" },\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " },\n", + " values: { messages: [ \u001b[36m[Array]\u001b[39m, \u001b[36m[AIMessage]\u001b[39m, \u001b[36m[ToolMessage]\u001b[39m, \u001b[36m[AIMessage]\u001b[39m ] },\n", " next: [],\n", - " metadata: {\n", - " source: \"loop\",\n", - " step: 3,\n", - " writes: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object],\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: []\n", - " },\n", - " ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " name: \"search\",\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\"\n", - " },\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \"The current weather in San Francisco is cold, with a temperature of around 13℃.\",\n", - " name: undefined,\n", - " additional_kwargs: {},\n", - " response_metadata: [Object],\n", - " tool_calls: [],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " }\n", - " },\n", + " metadata: { source: \u001b[32m'loop'\u001b[39m, step: \u001b[33m3\u001b[39m, writes: { messages: \u001b[36m[Array]\u001b[39m } },\n", " config: {\n", " configurable: {\n", - " thread_id: \"conversation-num-1\",\n", - " checkpoint_id: \"1ef14937-f9e8-6d21-8003-9b8b07560ef4\"\n", + " thread_id: \u001b[32m'conversation-num-1'\u001b[39m,\n", + " checkpoint_id: \u001b[32m'1ef16645-dedc-6920-8003-05dd7bf5a619'\u001b[39m\n", " }\n", " },\n", - " parentConfig: undefined\n", + " parentConfig: \u001b[90mundefined\u001b[39m\n", "}\n", "--\n", "{\n", - " values: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"\",\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object]\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: { tool_calls: [Array] },\n", - " response_metadata: { tokenUsage: [Object], finish_reason: \"tool_calls\" },\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: []\n", - " },\n", - " ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " name: \"search\",\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " name: \"search\",\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\"\n", - " }\n", - " ]\n", - " },\n", - " next: [ \"agent\" ],\n", - " metadata: {\n", - " source: \"loop\",\n", - " step: 2,\n", - " writes: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object],\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: []\n", - " },\n", - " ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \"Cold, with a low of 13 ℃\",\n", - " name: \"search\",\n", - " additional_kwargs: {},\n", - " response_metadata: {},\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\"\n", - " }\n", - " ]\n", - " }\n", - " },\n", + " values: { messages: [ \u001b[36m[Array]\u001b[39m, \u001b[36m[AIMessage]\u001b[39m, \u001b[36m[ToolMessage]\u001b[39m ] },\n", + " next: [ \u001b[32m'agent'\u001b[39m ],\n", + " metadata: { source: \u001b[32m'loop'\u001b[39m, step: \u001b[33m2\u001b[39m, writes: { messages: \u001b[36m[Array]\u001b[39m } },\n", " config: {\n", " configurable: {\n", - " thread_id: \"conversation-num-1\",\n", - " checkpoint_id: \"1ef14937-f509-6430-8002-60093a5ff01a\"\n", + " thread_id: \u001b[32m'conversation-num-1'\u001b[39m,\n", + " checkpoint_id: \u001b[32m'1ef16645-d8d3-6290-8002-acd679142065'\u001b[39m\n", " }\n", " },\n", - " parentConfig: undefined\n", + " parentConfig: \u001b[90mundefined\u001b[39m\n", "}\n", "--\n", "{\n", - " values: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"\",\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object]\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: { tool_calls: [Array] },\n", - " response_metadata: { tokenUsage: [Object], finish_reason: \"tool_calls\" },\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " },\n", - " next: [ \"agent\" ],\n", - " metadata: { source: \"loop\", step: 3 },\n", + " values: { messages: [ \u001b[36m[Array]\u001b[39m, \u001b[36m[AIMessage]\u001b[39m ] },\n", + " next: [ \u001b[32m'tools'\u001b[39m ],\n", + " metadata: { source: \u001b[32m'loop'\u001b[39m, step: \u001b[33m1\u001b[39m, writes: { messages: \u001b[36m[Array]\u001b[39m } },\n", " config: {\n", " configurable: {\n", - " thread_id: \"conversation-num-1\",\n", - " checkpoint_id: \"1ef14937-55f4-62a1-8003-ecabce7ce043\"\n", + " thread_id: \u001b[32m'conversation-num-1'\u001b[39m,\n", + " checkpoint_id: \u001b[32m'1ef16645-c45e-6580-8001-3508bcab0211'\u001b[39m\n", " }\n", " },\n", - " parentConfig: undefined\n", + " parentConfig: \u001b[90mundefined\u001b[39m\n", "}\n", "--\n", "{\n", - " values: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"\",\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object]\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: { tool_calls: [Array] },\n", - " response_metadata: { tokenUsage: [Object], finish_reason: \"tool_calls\" },\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " },\n", - " next: [ \"__start__\" ],\n", - " metadata: { source: \"input\", step: 2, writes: { __start__: {} } },\n", + " values: { messages: [ \u001b[36m[Array]\u001b[39m ] },\n", + " next: [ \u001b[32m'agent'\u001b[39m ],\n", + " metadata: { source: \u001b[32m'loop'\u001b[39m, step: \u001b[33m0\u001b[39m, writes: { messages: \u001b[36m[Array]\u001b[39m } },\n", " config: {\n", " configurable: {\n", - " thread_id: \"conversation-num-1\",\n", - " checkpoint_id: \"1ef14937-55f4-62a0-8002-e1c464fcfb61\"\n", + " thread_id: \u001b[32m'conversation-num-1'\u001b[39m,\n", + " checkpoint_id: \u001b[32m'1ef16645-be57-6602-8000-da3eed993c02'\u001b[39m\n", " }\n", " },\n", - " parentConfig: undefined\n", - "}\n", - "--\n", - "{\n", - " values: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"\",\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object]\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: { tool_calls: [Array] },\n", - " response_metadata: { tokenUsage: [Object], finish_reason: \"tool_calls\" },\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " },\n", - " next: [ \"tools\" ],\n", - " metadata: {\n", - " source: \"loop\",\n", - " step: 1,\n", - " writes: {\n", - " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", - " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: [Object],\n", - " lc_namespace: [Array],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: [Object],\n", - " response_metadata: [Object],\n", - " tool_calls: [Array],\n", - " invalid_tool_calls: []\n", - " }\n", - " ]\n", - " }\n", - " },\n", - " config: {\n", - " configurable: {\n", - " thread_id: \"conversation-num-1\",\n", - " checkpoint_id: \"1ef14937-5223-69a0-8001-97fe57ab4a3f\"\n", - " }\n", - " },\n", - " parentConfig: undefined\n", - "}\n", - "--\n", - "{\n", - " values: {\n", - " messages: [ [ \"user\", \"What's the weather like in SF currently?\" ] ]\n", - " },\n", - " next: [ \"agent\" ],\n", - " metadata: {\n", - " source: \"loop\",\n", - " step: 0,\n", - " writes: {\n", - " messages: [ [ \"user\", \"What's the weather like in SF currently?\" ] ]\n", - " }\n", - " },\n", - " config: {\n", - " configurable: {\n", - " thread_id: \"conversation-num-1\",\n", - " checkpoint_id: \"1ef14937-48f6-6f82-8000-d00ee5c84127\"\n", - " }\n", - " },\n", - " parentConfig: undefined\n", + " parentConfig: \u001b[90mundefined\u001b[39m\n", "}\n", "--\n", "{\n", " values: { messages: [] },\n", - " next: [ \"__start__\" ],\n", - " metadata: {\n", - " source: \"input\",\n", - " step: -1,\n", - " writes: { __start__: { messages: [ [Array] ] } }\n", - " },\n", + " next: [ \u001b[32m'__start__'\u001b[39m ],\n", + " metadata: { source: \u001b[32m'input'\u001b[39m, step: \u001b[33m-1\u001b[39m, writes: { __start__: \u001b[36m[Object]\u001b[39m } },\n", " config: {\n", " configurable: {\n", - " thread_id: \"conversation-num-1\",\n", - " checkpoint_id: \"1ef14937-48f6-6f81-unde-finedff8dcca677e7a3\"\n", + " thread_id: \u001b[32m'conversation-num-1'\u001b[39m,\n", + " checkpoint_id: \u001b[32m'1ef16645-be57-6601-unde-finedff5c9266290795'\u001b[39m\n", " }\n", " },\n", - " parentConfig: undefined\n", + " parentConfig: \u001b[90mundefined\u001b[39m\n", "}\n", "--\n" ] } ], "source": [ - "let toReplay = null;\n", - "for await (const state of graphWithInterrupt.getStateHistory(config)) {\n", + "let toReplay;\n", + "const states = await graphWithInterrupt.getStateHistory(config);\n", + "for await (const state of states) {\n", " console.log(state);\n", " console.log(\"--\");\n", " if (state.values?.messages?.length === 2) {\n", " toReplay = state;\n", " }\n", - "}\n" + "}" ] }, { "cell_type": "markdown", - "id": "d552de41", + "id": "342f0154", "metadata": {}, "source": [ "## Replay a past state\n", @@ -1164,8 +811,8 @@ }, { "cell_type": "code", - "execution_count": 35, - "id": "74e73b60", + "execution_count": 25, + "id": "c1cefbfa", "metadata": {}, "outputs": [ { @@ -1174,8 +821,21 @@ "text": [ "Cold, with a low of 13 ℃\n", "-----\n", - "\n", - "The current weather in San Francisco is cold, with a low of 13 ℃.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The current weather in San Francisco is cold, with a low of 13°C.\n", "-----\n", "\n" ] @@ -1202,7 +862,7 @@ }, { "cell_type": "markdown", - "id": "cc0cef56", + "id": "e870c084", "metadata": {}, "source": [ "## Branch off a past state\n", @@ -1219,8 +879,8 @@ }, { "cell_type": "code", - "execution_count": 66, - "id": "9a1c7ab8", + "execution_count": 26, + "id": "d7656840-3a4a-4a80-af74-214b35cfbadd", "metadata": {}, "outputs": [ { @@ -1229,51 +889,31 @@ "text": [ "{\n", " messages: [\n", - " [ \"user\", \"What's the weather like in SF currently?\" ],\n", + " [ \u001b[32m'user'\u001b[39m, \u001b[32m\"What's the weather like in SF currently?\"\u001b[39m ],\n", " AIMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"\",\n", - " tool_calls: [ [Object] ],\n", - " invalid_tool_calls: [],\n", - " additional_kwargs: { tool_calls: [Array] },\n", - " response_metadata: { tokenUsage: [Object], finish_reason: \"tool_calls\" }\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"\",\n", - " name: undefined,\n", - " additional_kwargs: { tool_calls: [ [Object] ] },\n", - " response_metadata: {\n", - " tokenUsage: { completionTokens: 17, promptTokens: 72, totalTokens: 89 },\n", - " finish_reason: \"tool_calls\"\n", - " },\n", - " tool_calls: [\n", - " {\n", - " name: \"search\",\n", - " args: [Object],\n", - " id: \"call_ONueXiVpSg1lC5MIFDVmDswq\"\n", - " }\n", - " ],\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", + " lc_namespace: \u001b[36m[Array]\u001b[39m,\n", + " content: \u001b[32m''\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: \u001b[36m[Object]\u001b[39m,\n", + " response_metadata: \u001b[36m[Object]\u001b[39m,\n", + " tool_calls: \u001b[36m[Array]\u001b[39m,\n", " invalid_tool_calls: []\n", " },\n", " ToolMessage {\n", - " lc_serializable: true,\n", - " lc_kwargs: {\n", - " content: \"It's sunny out, with a high of 38 ℃.\",\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\",\n", - " additional_kwargs: {},\n", - " response_metadata: {}\n", - " },\n", - " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", - " content: \"It's sunny out, with a high of 38 ℃.\",\n", - " name: undefined,\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: \u001b[36m[Object]\u001b[39m,\n", + " lc_namespace: \u001b[36m[Array]\u001b[39m,\n", + " content: \u001b[32m\"It's sunny out, with a high of 38 ℃.\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", " additional_kwargs: {},\n", " response_metadata: {},\n", - " tool_call_id: \"call_ONueXiVpSg1lC5MIFDVmDswq\"\n", + " tool_call_id: \u001b[32m'call_WRrsB6evR9HRlKvTpeKdTeMA'\u001b[39m\n", " }\n", " ]\n", "}\n", - "[ \"agent\" ]\n" + "[ \u001b[32m'agent'\u001b[39m ]\n" ] } ], @@ -1303,7 +943,7 @@ }, { "cell_type": "markdown", - "id": "fd97a823", + "id": "4689abd9-1008-4d8b-902c-e956a5913e12", "metadata": {}, "source": [ "#### Now you can run from this branch\n", @@ -1314,15 +954,22 @@ }, { "cell_type": "code", - "execution_count": 67, - "id": "38b185d3", + "execution_count": 27, + "id": "bb95930f-07e5-4e32-8e38-2170d36ab1a0", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Skipping write for channel branch:agent:routeMessage:undefined which has no readers\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "The current weather in San Francisco is sunny with a high of 38°C (100°F).\n", + "The current weather in San Francisco is sunny, with a high of 38°C.\n", "-----\n", "\n" ] @@ -1350,25 +997,31 @@ { "cell_type": "code", "execution_count": null, - "id": "bec22884", + "id": "67565ff9-8d4a-4960-952c-ac1eac5ca97c", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { + "jupytext": { + "encoding": "# -*- coding: utf-8 -*-" + }, "kernelspec": { - "display_name": "Deno", + "display_name": "TypeScript", "language": "typescript", - "name": "deno" + "name": "tslab" }, "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, "file_extension": ".ts", - "mimetype": "text/x.typescript", + "mimetype": "text/typescript", "name": "typescript", - "nb_converter": "script", - "pygments_lexer": "typescript", - "version": "5.4.5" + "version": "3.7.2" } }, "nbformat": 4, diff --git a/langgraph/package.json b/langgraph/package.json index 817a61e0..73d14457 100644 --- a/langgraph/package.json +++ b/langgraph/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/langgraph", - "version": "0.0.17", + "version": "0.0.18", "description": "LangGraph", "type": "module", "engines": { @@ -42,6 +42,7 @@ }, "devDependencies": { "@jest/globals": "^29.5.0", + "@langchain/anthropic": "^0.1.21", "@langchain/community": "^0.0.43", "@langchain/openai": "latest", "@langchain/scripts": "^0.0.13", diff --git a/package.json b/package.json index 1fa933ed..f94ce992 100644 --- a/package.json +++ b/package.json @@ -32,10 +32,12 @@ "license": "MIT", "resolutions": { "@langchain/core": "0.1.63", + "@langchain/anthropic": "^0.1.21", "zod": "3.22.4" }, "devDependencies": { "@jest/globals": "^29.5.0", + "@langchain/anthropic": "^0.1.21", "@langchain/core": "0.1.63", "@langchain/openai": "latest", "@swc/core": "^1.3.90", diff --git a/yarn.lock b/yarn.lock index 8dd6c97a..2e59e2b9 100644 --- a/yarn.lock +++ b/yarn.lock @@ -22,6 +22,22 @@ __metadata: languageName: node linkType: hard +"@anthropic-ai/sdk@npm:^0.21.0": + version: 0.21.0 + resolution: "@anthropic-ai/sdk@npm:0.21.0" + dependencies: + "@types/node": ^18.11.18 + "@types/node-fetch": ^2.6.4 + abort-controller: ^3.0.0 + agentkeepalive: ^4.2.1 + form-data-encoder: 1.7.2 + formdata-node: ^4.3.2 + node-fetch: ^2.6.7 + web-streams-polyfill: ^3.2.1 + checksum: fbed720938487495f1d28822fa6eb3871cf7e7be325c299b69efa78e72e1e0b66d9f564003ae5d7a1e96c7555cc69c817be4b901d1847ae002f782546a4c987d + languageName: node + linkType: hard + "@anthropic-ai/sdk@npm:^0.9.1": version: 0.9.1 resolution: "@anthropic-ai/sdk@npm:0.9.1" @@ -983,6 +999,19 @@ __metadata: languageName: node linkType: hard +"@langchain/anthropic@npm:^0.1.21": + version: 0.1.21 + resolution: "@langchain/anthropic@npm:0.1.21" + dependencies: + "@anthropic-ai/sdk": ^0.21.0 + "@langchain/core": ">0.1.56 <0.3.0" + fast-xml-parser: ^4.3.5 + zod: ^3.22.4 + zod-to-json-schema: ^3.22.4 + checksum: 986b6c771d853653b9c538a995b4bd6c0090ce850d3958a177739ee5d9bb04a2d4cd20c4be5e77b8182943614824aebc66677b9fec4f638174557c03ac37eab2 + languageName: node + linkType: hard + "@langchain/community@npm:^0.0.43": version: 0.0.43 resolution: "@langchain/community@npm:0.0.43" @@ -1586,6 +1615,7 @@ __metadata: resolution: "@langchain/langgraph@workspace:langgraph" dependencies: "@jest/globals": ^29.5.0 + "@langchain/anthropic": ^0.1.21 "@langchain/community": ^0.0.43 "@langchain/core": ^0.1.61 "@langchain/openai": latest @@ -4591,6 +4621,17 @@ __metadata: languageName: node linkType: hard +"fast-xml-parser@npm:^4.3.5": + version: 4.4.0 + resolution: "fast-xml-parser@npm:4.4.0" + dependencies: + strnum: ^1.0.5 + bin: + fxparser: src/cli/cli.js + checksum: ad33a4b5165a0ffcb6e17ae78825bd4619a8298844a8a8408f2ea141a0d2d9439d18865dc5254162f09fe54d510ff18e5d5c0a190869cab21fc745ee66be816b + languageName: node + linkType: hard + "fastq@npm:^1.6.0": version: 1.17.1 resolution: "fastq@npm:1.17.1" @@ -6707,6 +6748,7 @@ __metadata: resolution: "langgraph@workspace:." dependencies: "@jest/globals": ^29.5.0 + "@langchain/anthropic": ^0.1.21 "@langchain/core": 0.1.63 "@langchain/openai": latest "@swc/core": ^1.3.90 @@ -8923,6 +8965,13 @@ __metadata: languageName: node linkType: hard +"strnum@npm:^1.0.5": + version: 1.0.5 + resolution: "strnum@npm:1.0.5" + checksum: 651b2031db5da1bf4a77fdd2f116a8ac8055157c5420f5569f64879133825915ad461513e7202a16d7fec63c54fd822410d0962f8ca12385c4334891b9ae6dd2 + languageName: node + linkType: hard + "supports-color@npm:^5.3.0": version: 5.5.0 resolution: "supports-color@npm:5.5.0"