Create the agent state and define tools for the agent to use.
from typing import Annotated, Sequencefrom langchain_core.messages import BaseMessagefrom langchain_core.tools import toolfrom langgraph.graph import StateGraph, START, END, add_messagesfrom typing_extensions import TypedDict# Define stateclass AgentState(TypedDict): messages: Annotated[Sequence[BaseMessage], add_messages]# Define tools@tooldef search_web(query: str) -> str: """Search the web for current information. Args: query: The search query Returns: Search results as a string """ from langchain_community.tools.tavily_search import TavilySearchResults search = TavilySearchResults(max_results=2) results = search.invoke(query) return str(results)@tooldef calculate(expression: str) -> str: """Calculate a mathematical expression. Args: expression: A Python expression to evaluate (e.g., "2 + 2" or "10 * 5") Returns: The result of the calculation """ try: result = eval(expression) return str(result) except Exception as e: return f"Error: {str(e)}"@tooldef get_weather(location: str) -> str: """Get current weather for a location. Args: location: City name or location Returns: Weather information """ # Mock implementation - replace with real API return f"The weather in {location} is sunny, 72°F"tools = [search_web, calculate, get_weather]
2
Create the agent node
Build the agent that decides which tools to call.
from langchain_openai import ChatOpenAI# Initialize model with toolsmodel = ChatOpenAI(model="gpt-4", temperature=0)model_with_tools = model.bind_tools(tools)def agent_node(state: AgentState) -> dict: """Agent that can call tools.""" messages = state["messages"] response = model_with_tools.invoke(messages) return {"messages": [response]}
The agent:
Receives conversation history
Decides if tools are needed
Returns either a tool call or final answer
3
Create the tool execution node
Build a node that executes tool calls.
from langgraph.prebuilt import ToolNode# Create tool execution nodetool_node = ToolNode(tools)
The ToolNode:
Automatically executes tool calls
Handles multiple tools
Returns results as messages
4
Add routing logic
Create a function to decide whether to call tools or finish.
def should_continue(state: AgentState) -> str: """Determine whether to call tools or end.""" messages = state["messages"] last_message = messages[-1] # If the LLM makes a tool call, route to tools if last_message.tool_calls: return "tools" # Otherwise, end the conversation return "end"
from langchain_core.messages import HumanMessage# Test calculationresult = app.invoke({ "messages": [HumanMessage(content="What is 25 * 17?")]})print(result["messages"][-1].content)# "25 * 17 equals 425."# Test web searchresult = app.invoke({ "messages": [HumanMessage(content="What are the latest news about AI?")]})print(result["messages"][-1].content)# "Here are the latest AI news: [search results]..."# Test weatherresult = app.invoke({ "messages": [HumanMessage(content="What's the weather in San Francisco?")]})print(result["messages"][-1].content)# "The weather in San Francisco is sunny, 72°F."# Test multiple toolsresult = app.invoke({ "messages": [HumanMessage( content="Search for the population of Tokyo and calculate 10% of it" )]})print(result["messages"][-1].content)# Agent will use search_web, then calculate, then provide answer
# Math calculation>>> "What is 25 * 17?""25 * 17 equals 425."# Weather query>>> "What's the weather in Tokyo?""The weather in Tokyo is sunny, 72°F."# Complex multi-step>>> "Calculate 100 * 50, then tell me the weather""100 * 50 equals 5000. However, I need a specific location to check the weather."
@tooldef safe_calculate(expression: str) -> str: """Calculate with validation.""" # Validate input allowed_chars = set("0123456789+-*/(). ") if not all(c in allowed_chars for c in expression): return "Error: Invalid characters in expression" try: result = eval(expression) return str(result) except Exception as e: return f"Calculation error: {str(e)}"
Add streaming
# Stream agent executionfor chunk in app.stream({ "messages": [HumanMessage(content="What is 10 + 20?")]}): for node_name, node_output in chunk.items(): print(f"--- {node_name} ---") print(node_output)
Add custom tools with APIs
import requests@tooldef get_stock_price(symbol: str) -> str: """Get current stock price. Args: symbol: Stock ticker symbol (e.g., 'AAPL') """ # Replace with real API response = requests.get( f"https://api.example.com/stock/{symbol}" ) return response.json()["price"]