From 5fbdcd14414b4001b811eb309b4f67ab4164c7dd Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Fri, 14 Jun 2024 11:09:30 -0700 Subject: [PATCH] Remove deprecated method in examples (#215) --- examples/agent_executor/base.ipynb | 4 +- .../base.ipynb | 4 +- .../chatbots/customer_support_mistral.ipynb | 3 +- examples/rag/langgraph_crag_mistral.ipynb | 4 +- langgraph/README.md | 39 ++++++------------- langgraph/src/tests/chatbot.int.test.ts | 2 +- langgraph/src/tests/tracing.int.test.ts | 10 ++--- 7 files changed, 25 insertions(+), 41 deletions(-) diff --git a/examples/agent_executor/base.ipynb b/examples/agent_executor/base.ipynb index 27fae7f6..220ccc53 100644 --- a/examples/agent_executor/base.ipynb +++ b/examples/agent_executor/base.ipynb @@ -254,7 +254,7 @@ "outputs": [], "source": [ "import { RunnableLambda } from \"@langchain/core/runnables\";\n", - "import { END, StateGraph } from \"@langchain/langgraph\";\n", + "import { START, END, StateGraph } from \"@langchain/langgraph\";\n", "\n", "// Define a new graph\n", "const workflow = new StateGraph({\n", @@ -267,7 +267,7 @@ "\n", "// Set the entrypoint as `agent`\n", "// This means that this node is the first one called\n", - "workflow.setEntryPoint(\"agent\");\n", + "workflow.addEdge(START, \"agent\");\n", "\n", "// We now add a conditional edge\n", "workflow.addConditionalEdges(\n", diff --git a/examples/chat_agent_executor_with_function_calling/base.ipynb b/examples/chat_agent_executor_with_function_calling/base.ipynb index 7f05d747..164ff306 100644 --- a/examples/chat_agent_executor_with_function_calling/base.ipynb +++ b/examples/chat_agent_executor_with_function_calling/base.ipynb @@ -353,7 +353,7 @@ "metadata": {}, "outputs": [], "source": [ - "import { END, StateGraph } from \"@langchain/langgraph\";\n", + "import { START, END, StateGraph } from \"@langchain/langgraph\";\n", "\n", "// Define a new graph\n", "const workflow = new StateGraph({\n", @@ -366,7 +366,7 @@ "\n", "// Set the entrypoint as `agent`\n", "// This means that this node is the first one called\n", - "workflow.setEntryPoint(\"agent\");\n", + "workflow.addEdge(START, \"agent\");\n", "\n", "// We now add a conditional edge\n", "workflow.addConditionalEdges(\n", diff --git a/examples/chatbots/customer_support_mistral.ipynb b/examples/chatbots/customer_support_mistral.ipynb index 6d2af097..5e86f1ec 100644 --- a/examples/chatbots/customer_support_mistral.ipynb +++ b/examples/chatbots/customer_support_mistral.ipynb @@ -149,6 +149,7 @@ "source": [ "import { MessagesPlaceholder } from \"@langchain/core/prompts\";\n", "import type { BaseMessage } from \"@langchain/core/messages\";\n", + "import { START } from \"@langchain/langgraph\";\n", "\n", "graph.addNode(\"initial_support\", async (state: BaseMessage[]) => {\n", " const SYSTEM_TEMPLATE =\n", @@ -167,7 +168,7 @@ " return prompt.pipe(model).invoke({ messages: state });\n", "});\n", "\n", - "graph.setEntryPoint(\"initial_support\");" + "graph.addEdge(START, \"initial_support\");" ] }, { diff --git a/examples/rag/langgraph_crag_mistral.ipynb b/examples/rag/langgraph_crag_mistral.ipynb index 198cc089..f34d9c99 100644 --- a/examples/rag/langgraph_crag_mistral.ipynb +++ b/examples/rag/langgraph_crag_mistral.ipynb @@ -594,7 +594,7 @@ "metadata": {}, "outputs": [], "source": [ - "import { END, StateGraph } from \"@langchain/langgraph\";\n", + "import { START, END, StateGraph } from \"@langchain/langgraph\";\n", "\n", "const workflow = new StateGraph({\n", " channels: graphState,\n", @@ -608,7 +608,7 @@ "workflow.addNode(\"webSearch\", webSearch);\n", "\n", "// Build graph\n", - "workflow.setEntryPoint(\"retrieve\");\n", + "workflow.addEdge(START, \"retrieve\");\n", "workflow.addEdge(\"retrieve\", \"gradeDocuments\");\n", "workflow.addConditionalEdges(\n", " \"gradeDocuments\",\n", diff --git a/langgraph/README.md b/langgraph/README.md index acd0b2cc..ce19dc06 100644 --- a/langgraph/README.md +++ b/langgraph/README.md @@ -46,7 +46,7 @@ And now we're ready! The graph below contains a single node called `"oracle"` th ```ts import { ChatOpenAI } from "@langchain/openai"; import { HumanMessage, BaseMessage, } from "@langchain/core/messages"; -import { END, MessageGraph } from "@langchain/langgraph"; +import { START, END, MessageGraph } from "@langchain/langgraph"; const model = new ChatOpenAI({ temperature: 0 }); @@ -58,7 +58,7 @@ graph.addNode("oracle", async (state: BaseMessage[]) => { graph.addEdge("oracle", END); -graph.setEntryPoint("oracle"); +graph.addEdge(START, "oracle"); const runnable = graph.compile(); ``` @@ -90,7 +90,7 @@ So what did we do here? Let's break it down step by step: 1. First, we initialize our model and a `MessageGraph`. 2. Next, we add a single node to the graph, called `"oracle"`, which simply calls the model with the given input. 3. We add an edge from this `"oracle"` node to the special value `END`. This means that execution will end after current node. -4. We set `"oracle"` as the entrypoint to the graph. +4. We set `"oracle"` as the entrypoint to the graph by adding an edge from the special `START` value to it. 5. We compile the graph, ensuring that no more modifications to it can be made. Then, when we execute the graph: @@ -185,7 +185,7 @@ graph.addNode("calculator", async (state: BaseMessage[]) => { graph.addEdge("calculator", END); -graph.setEntryPoint("oracle"); +graph.addEdge(START, "oracle"); ``` Now let's think - what do we want to have happen? @@ -477,7 +477,7 @@ const callTool = async ( We can now put it all together and define the graph! ```typescript -import { StateGraph, END } from "@langchain/langgraph"; +import { StateGraph, START, END } from "@langchain/langgraph"; import { RunnableLambda } from "@langchain/core/runnables"; // Define a new graph @@ -491,7 +491,7 @@ workflow.addNode("action", callTool); // Set the entrypoint as `agent` // This means that this node is the first one called -workflow.setEntryPoint("agent"); +workflow.addEdge(START, "agent"); // We now add a conditional edge workflow.addConditionalEdges( @@ -723,31 +723,14 @@ This takes three arguments: - `condition`: A function to call to decide what to do next. The input will be the output of the start node. It should return a string that is present in `conditionalEdgeMapping` and represents the edge to take. - `conditionalEdgeMapping`: A mapping of string to string. The keys should be strings that may be returned by `condition`. The values should be the downstream node to call if that condition is returned. -### `.setEntryPoint` +### `START` ```typescript -setEntryPoint(key: string): void +import { START } from "@langchain/langgraph"; ``` -The entrypoint to the graph. -This is the node that is first called. -It only takes one argument: - -- `key`: The name of the node that should be called first. - -### `.setFinishPoint` - -```typescript -setFinishPoint(key: string): void -``` - -This is the exit point of the graph. -When this node is called, the results will be the final result from the graph. -It only has one argument: - -- `key`: The name of the node that, when called, will return the results of calling it as the final output - -Note: This does not need to be called if at any point you previously created an edge (conditional or normal) to `END` +This is a special node representing the start of the graph. +This means that anything with an edge from this node will be the entrypoint of the graph. ### `END` @@ -827,7 +810,7 @@ workflow.addNode("agent", agent); workflow.addNode("tools", executeTools); // We now set the entry point to be this first agent -workflow.setEntryPoint("firstAgent"); +workflow.addEdge(START, "firstAgent"); // We define the same edges as before workflow.addConditionalEdges("agent", shouldContinue, { diff --git a/langgraph/src/tests/chatbot.int.test.ts b/langgraph/src/tests/chatbot.int.test.ts index 66e98b53..ebc75a0d 100644 --- a/langgraph/src/tests/chatbot.int.test.ts +++ b/langgraph/src/tests/chatbot.int.test.ts @@ -21,7 +21,7 @@ describe("Chatbot", () => { const graph = new MessageGraph() .addNode("oracle", async (state: BaseMessage[]) => model.invoke(state)) .addEdge("oracle", END) - .setEntryPoint("oracle") + .addEdge(START, "oracle") .compile(); const res = await graph.invoke(new HumanMessage("What is 1 + 1?")); diff --git a/langgraph/src/tests/tracing.int.test.ts b/langgraph/src/tests/tracing.int.test.ts index 3185d6c9..77dbfcef 100644 --- a/langgraph/src/tests/tracing.int.test.ts +++ b/langgraph/src/tests/tracing.int.test.ts @@ -27,7 +27,7 @@ import { z } from "zod"; import { ToolExecutor } from "../prebuilt/tool_executor.js"; import { createAgentExecutor } from "../prebuilt/agent_executor.js"; // Import from main `@langchain/langgraph` endpoint to turn on automatic config passing -import { StateGraph, END } from "../index.js"; +import { StateGraph, END, START } from "../index.js"; test.skip("Can invoke with tracing", async () => { const tools = [new TavilySearchResults({ maxResults: 1 })]; @@ -103,7 +103,7 @@ test.skip("Can invoke with tracing", async () => { .addNode("action", new RunnableLambda({ func: executeTools })) // Set the entrypoint as `agent` // This means that this node is the first one called - .setEntryPoint("agent") + .addEdge(START, "agent") // We now add a conditional edge .addConditionalEdges( // First, we define the start node. We use `agent`. @@ -234,7 +234,7 @@ test.skip("Can nest an agent executor", async () => { // Or end work if done FINISH: END, }) - .setEntryPoint("supervisor"); + .addEdge(START, "supervisor"); const graph = workflow.compile(); @@ -352,7 +352,7 @@ test.skip("Can nest a graph within a graph", async () => { researcher: "researcher", FINISH: END, }) - .setEntryPoint("supervisor"); + .addEdge(START, "supervisor"); const graph = workflow.compile(); @@ -525,7 +525,7 @@ Only add steps to the plan that still NEED to be done. Do not return previously .addNode("agent", executeStep) // Add a replan node .addNode("replan", replanStep) - .setEntryPoint("planner") + .addEdge(START, "planner") // From plan we go to agent .addEdge("planner", "agent") // From agent, we replan