LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows. Compared to other LLM frameworks, it offers these core benefits: cycles, controllability, and persistence. LangGraph allows you to define flows that involve cycles, essential for most agentic architectures, differentiating it from DAG-based solutions. This tutorial focuses on showcasing persisting state of LangGraph with Couchbase.
When creating LangGraph agents, you can also set them up so that they persist their state. This allows you to do things like interact with an agent multiple times and have it remember previous interactions.
This reference implementation shows how to use Couchbase as the backend for persisting checkpoint state. Make sure that you have Couchbase running on port 8091
for going through this guide, or you may use Couchbase Capella by changing connection string.
NOTE: this is just a reference implementation. You can implement your own checkpointer using a different database or modify this one as long as it conforms to the BaseCheckpointSaver
interface.
Requires Couchbase Python SDK and langgraph package
%%capture --no-stderr
%pip install -U couchbase langgraph
This particular example uses OpenAI's GPT4o-mini as the model
import getpass
import os
def _set_env(var: str):
if not os.environ.get(var):
os.environ[var] = getpass.getpass(f"{var}: ")
_set_env("OPENAI_API_KEY")
Below is an implementation of CouchbaseSaver (for synchronous use of graph, i.e. .invoke()
, .stream()
). CouchbaseSaver implements four methods that are required for any checkpointer:
.put
- Store a checkpoint with its configuration and metadata..put_writes
- Store intermediate writes linked to a checkpoint (i.e. pending writes)..get_tuple
- Fetch a checkpoint tuple using for a given configuration (thread_id
and checkpoint_id
)..list
- List checkpoints that match a given configuration and filter criteria.from contextlib import contextmanager
from datetime import timedelta
from typing import Any, Dict, Iterator, Optional, Sequence, Tuple
from langchain_core.runnables import RunnableConfig
from couchbase.cluster import Cluster
from couchbase.bucket import Bucket
from couchbase.auth import PasswordAuthenticator
from couchbase.options import ClusterOptions, QueryOptions, UpsertOptions
from langgraph.checkpoint.base import (
BaseCheckpointSaver,
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
get_checkpoint_id,
)
class CouchbaseSaver(BaseCheckpointSaver):
"""A checkpoint saver that stores checkpoints in a Couchbase database."""
cluster: Cluster
bucket: Bucket
def __init__(
self,
cluster: Cluster,
bucket_name: str,
) -> None:
super().__init__()
self.cluster = cluster
self.bucket = self.cluster.bucket(bucket_name)
@classmethod
@contextmanager
def from_conn_info(
cls, *, cb_conn_str :str, cb_username: str, cb_password: str, bucket_name: str, scope_name: str
) -> Iterator["CouchbaseSaver"]:
cluster = None
try:
# Connect to Couchbase Cluster
auth = PasswordAuthenticator(cb_username, cb_password)
options = ClusterOptions(auth)
cluster = Cluster(cb_conn_str, options)
cluster.wait_until_ready(timedelta(seconds=5))
cls.cluster = cluster
cls.bucket_name = bucket_name
cls.scope_name = scope_name
yield CouchbaseSaver(cluster, bucket_name)
finally:
if cluster:
cluster.close()
def get_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
"""Get a checkpoint tuple from the database.
This method retrieves a checkpoint tuple from the Couchbase database based on the
provided config. If the config contains a "checkpoint_id" key, the checkpoint with
the matching thread ID and checkpoint ID is retrieved. Otherwise, the latest checkpoint
for the given thread ID is retrieved.
Args:
config (RunnableConfig): The config to use for retrieving the checkpoint.
Returns:
Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found.
"""
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"].get("checkpoint_ns", "")
checkpoint_id = get_checkpoint_id(config)
if checkpoint_id:
query = f'SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoints` WHERE thread_id = $1 AND checkpoint_ns = $2 AND checkpoint_id = $3 ORDER BY checkpoint_id DESC LIMIT 1'
query_params = [thread_id, checkpoint_ns, checkpoint_id]
else:
query = f'SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoints` WHERE thread_id = $1 AND checkpoint_ns = $2 ORDER BY checkpoint_id DESC LIMIT 1'
query_params = [thread_id, checkpoint_ns]
result = self.cluster.query(query, QueryOptions(positional_parameters=query_params))
for row in result:
doc = row["checkpoints"]
config_values = {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": doc["checkpoint_id"],
}
checkpoint = self.serde.loads_typed((doc["type"], doc["checkpoint"].encode()))
serialized_writes_query = f'SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoint_writes` WHERE thread_id = $1 AND checkpoint_ns = $2 AND checkpoint_id = $3'
serialized_writes_params = [thread_id, checkpoint_ns, doc["checkpoint_id"] or ""]
serialized_writes_result = self.cluster.query(serialized_writes_query, QueryOptions(positional_parameters=serialized_writes_params))
pending_writes = []
for write_doc in serialized_writes_result:
checkpoint_writes = write_doc.get("checkpoint_writes", {})
if "task_id" not in checkpoint_writes:
print("Error: 'task_id' is not present in checkpoint_writes")
else:
pending_writes.append(
(
checkpoint_writes["task_id"],
checkpoint_writes["channel"],
self.serde.loads_typed((checkpoint_writes["type"], checkpoint_writes["value"])),
)
)
return CheckpointTuple(
{"configurable": config_values},
checkpoint,
self.serde.loads(doc["metadata"].encode()),
(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": doc["parent_checkpoint_id"],
}
}
if doc.get("parent_checkpoint_id")
else None
),
pending_writes,
)
def list(
self,
config: Optional[RunnableConfig],
*,
filter: Optional[Dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> Iterator[CheckpointTuple]:
"""List checkpoints from the database.
This method retrieves a list of checkpoint tuples from the Couchbase database based
on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first).
Args:
config (RunnableConfig): The config to use for listing the checkpoints.
filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata. Defaults to None.
before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None.
limit (Optional[int]): The maximum number of checkpoints to return. Defaults to None.
Yields:
Iterator[CheckpointTuple]: An iterator of checkpoint tuples.
"""
query = f"SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoints` WHERE 1=1"
query_params = []
if config is not None:
query += " AND thread_id = $1 AND checkpoint_ns = $2"
query_params.extend([config["configurable"]["thread_id"], config["configurable"].get("checkpoint_ns", "")])
if filter:
for key, value in filter.items():
query += f" AND metadata.{key} = ${len(query_params) + 1}"
query_params.append(value)
if before is not None:
query += f" AND checkpoint_id < ${len(query_params) + 1}"
query_params.append(before["configurable"]["checkpoint_id"])
query += " ORDER BY checkpoint_id DESC"
if limit is not None:
query += f" LIMIT {limit}"
result = self.cluster.query(query, QueryOptions(positional_parameters=query_params))
for row in result:
doc = row["checkpoints"]
checkpoint = self.serde.loads_typed((doc["type"], doc["checkpoint"]))
yield CheckpointTuple(
{
"configurable": {
"thread_id": doc["thread_id"],
"checkpoint_ns": doc["checkpoint_ns"],
"checkpoint_id": doc["checkpoint_id"],
}
},
checkpoint,
self.serde.loads(doc["metadata"].encode()),
(
{
"configurable": {
"thread_id": doc["thread_id"],
"checkpoint_ns": doc["checkpoint_ns"],
"checkpoint_id": doc["parent_checkpoint_id"],
}
}
if doc.get("parent_checkpoint_id")
else None
),
)
def put(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
"""Save a checkpoint to the database.
This method saves a checkpoint to the Couchbase database. The checkpoint is associated
with the provided config and its parent config (if any).
Args:
config (RunnableConfig): The config to associate with the checkpoint.
checkpoint (Checkpoint): The checkpoint to save.
metadata (CheckpointMetadata): Additional metadata to save with the checkpoint.
new_versions (ChannelVersions): New channel versions as of this write.
Returns:
RunnableConfig: Updated configuration after storing the checkpoint.
"""
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
checkpoint_id = checkpoint["id"]
type_, serialized_checkpoint = self.serde.dumps_typed(checkpoint)
if serialized_checkpoint:
serialized_checkpoint = serialized_checkpoint.decode()
metadata = self.serde.dumps(metadata)
if metadata:
metadata = metadata.decode()
doc = {
"parent_checkpoint_id": config["configurable"].get("checkpoint_id"),
"type": type_,
"checkpoint": serialized_checkpoint,
"metadata": metadata,
"thread_id" : thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
upsert_key = f"{thread_id}::{checkpoint_ns}::{checkpoint_id}"
collection = self.bucket.scope(self.scope_name).collection("checkpoints")
collection.upsert(upsert_key, (doc), UpsertOptions(timeout=timedelta(seconds=5)))
return {
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
}
def put_writes(
self,
config: RunnableConfig,
writes: Sequence[Tuple[str, Any]],
task_id: str,
) -> None:
"""Store intermediate writes linked to a checkpoint.
This method saves intermediate writes associated with a checkpoint to the Couchbase database.
Args:
config (RunnableConfig): Configuration of the related checkpoint.
writes (Sequence[Tuple[str, Any]]): List of writes to store, each as (channel, value) pair.
task_id (str): Identifier for the task creating the writes.
"""
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
checkpoint_id = config["configurable"]["checkpoint_id"]
collection = self.bucket.scope(self.scope_name).collection('checkpoint_writes')
for idx, (channel, value) in enumerate(writes):
upsert_key = f"{thread_id}::{checkpoint_ns}::{checkpoint_id}::{task_id}::{idx}"
type_, serialized_value = self.serde.dumps_typed(value)
if serialized_value:
serialized_value = serialized_value.decode().replace("'", '"')
doc = {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
"task_id": task_id,
"idx": idx,
"channel": channel,
"type": type_,
"value": serialized_value,
}
collection.upsert(upsert_key, (doc), UpsertOptions(timeout=timedelta(seconds=5)))
Below is a reference implementation of AsyncCouchbaseSaver (for asynchronous use of graph, i.e. .ainvoke()
, .astream()
). AsyncCouchbaseSaver implements four methods that are required for any async checkpointer:
.aput
- Store a checkpoint with its configuration and metadata..aput_writes
- Store intermediate writes linked to a checkpoint (i.e. pending writes)..aget_tuple
- Fetch a checkpoint tuple using for a given configuration (thread_id
and checkpoint_id
)..alist
- List checkpoints that match a given configuration and filter criteria.from contextlib import asynccontextmanager
from datetime import timedelta
from typing import Any, AsyncIterator, Dict, Optional, Sequence, Tuple
from langchain_core.runnables import RunnableConfig
from acouchbase.cluster import Cluster as ACluster
from acouchbase.bucket import Bucket as ABucket
from couchbase.auth import PasswordAuthenticator
from couchbase.options import ClusterOptions, QueryOptions, UpsertOptions
from langgraph.checkpoint.base import (
BaseCheckpointSaver,
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
get_checkpoint_id,
)
class AsyncCouchbaseSaver(BaseCheckpointSaver):
"""A checkpoint saver that stores checkpoints in a Couchbase database."""
cluster: ACluster
bucket: ABucket
def __init__(
self,
cluster: ACluster,
) -> None:
super().__init__()
self.cluster = cluster
@classmethod
@asynccontextmanager
async def from_conn_info(
cls, *, cb_conn_str :str, cb_username: str, cb_password: str, bucket_name: str, scope_name: str
) -> AsyncIterator["AsyncCouchbaseSaver"]:
cluster = None
try:
auth = PasswordAuthenticator(cb_username, cb_password)
options = ClusterOptions(auth)
cluster = await ACluster.connect(cb_conn_str, options)
cls.cluster = cluster
cls.bucket_name = bucket_name
cls.scope_name = scope_name
saver = AsyncCouchbaseSaver(cluster)
cls.bucket = cluster.bucket(bucket_name)
await cls.bucket.on_connect()
yield saver
finally:
if cluster:
await cluster.close()
async def aget_tuple(self, config: RunnableConfig) -> Optional[CheckpointTuple]:
"""Get a checkpoint tuple from the database asynchronously.
This method retrieves a checkpoint tuple from the Couchbase database based on the
provided config. If the config contains a "checkpoint_id" key, the checkpoint with
the matching thread ID and checkpoint ID is retrieved. Otherwise, the latest checkpoint
for the given thread ID is retrieved.
Args:
config (RunnableConfig): The config to use for retrieving the checkpoint.
Returns:
Optional[CheckpointTuple]: The retrieved checkpoint tuple, or None if no matching checkpoint was found.
"""
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"].get("checkpoint_ns", "")
checkpoint_id = get_checkpoint_id(config)
print("thread_id: ", thread_id, "checkpoint_ns: ", checkpoint_ns, "checkpoint_id: ", checkpoint_id)
if checkpoint_id:
query = f'SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoints` WHERE thread_id = $1 AND checkpoint_ns = $2 AND checkpoint_id = $3 ORDER BY checkpoint_id DESC LIMIT 1'
query_params = [thread_id, checkpoint_ns, checkpoint_id]
else:
query = f'SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoints` WHERE thread_id = $1 AND checkpoint_ns = $2 ORDER BY checkpoint_id DESC LIMIT 1'
query_params = [thread_id, checkpoint_ns]
print(query)
result = self.cluster.query(query, QueryOptions(positional_parameters=query_params))
async for row in result:
doc = row["checkpoints"]
config_values = {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": doc["checkpoint_id"],
}
checkpoint = self.serde.loads_typed((doc["type"], doc["checkpoint"].encode()))
serialized_writes_query = f'SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoint_writes` WHERE thread_id = $1 AND checkpoint_ns = $2 AND checkpoint_id = $3'
serialized_writes_params = [thread_id, checkpoint_ns, doc["checkpoint_id"] or ""]
print(serialized_writes_query, serialized_writes_params)
serialized_writes_result = self.cluster.query(serialized_writes_query, QueryOptions(positional_parameters=serialized_writes_params))
pending_writes = []
async for write_doc in serialized_writes_result:
print(f"write_doc: {write_doc}") # Debugging statement to log the contents of write_doc
checkpoint_writes = write_doc.get("checkpoint_writes", {})
if "task_id" not in checkpoint_writes:
print("Error: 'task_id' is not present in checkpoint_writes")
else:
pending_writes.append(
(
checkpoint_writes["task_id"],
checkpoint_writes["channel"],
self.serde.loads_typed((checkpoint_writes["type"], checkpoint_writes["value"])),
)
)
return CheckpointTuple(
{"configurable": config_values},
checkpoint,
self.serde.loads(doc["metadata"].encode()),
(
{
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": doc["parent_checkpoint_id"],
}
}
if doc.get("parent_checkpoint_id")
else None
),
pending_writes,
)
async def alist(
self,
config: Optional[RunnableConfig],
*,
filter: Optional[Dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> AsyncIterator[CheckpointTuple]:
"""List checkpoints from the database asynchronously.
This method retrieves a list of checkpoint tuples from the Couchbase database based
on the provided config. The checkpoints are ordered by checkpoint ID in descending order (newest first).
Args:
config (Optional[RunnableConfig]): The config to use for listing the checkpoints.
filter (Optional[Dict[str, Any]]): Additional filtering criteria for metadata. Defaults to None.
before (Optional[RunnableConfig]): If provided, only checkpoints before the specified checkpoint ID are returned. Defaults to None.
limit (Optional[int]): The maximum number of checkpoints to return. Defaults to None.
Yields:
AsyncIterator[CheckpointTuple]: An asynchronous iterator of checkpoint tuples.
"""
query = f"SELECT * FROM {self.bucket_name}.{self.scope_name}.`checkpoints` WHERE 1=1"
query_params = []
if config is not None:
query += " AND thread_id = $1 AND checkpoint_ns = $2"
query_params.extend([config["configurable"]["thread_id"], config["configurable"].get("checkpoint_ns", "")])
if filter:
for key, value in filter.items():
query += f" AND metadata.{key} = ${len(query_params) + 1}"
query_params.append(value)
if before is not None:
query += f" AND checkpoint_id < ${len(query_params) + 1}"
query_params.append(before["configurable"]["checkpoint_id"])
query += " ORDER BY checkpoint_id DESC"
if limit is not None:
query += f" LIMIT {limit}"
result = self.cluster.query(query, QueryOptions(positional_parameters=query_params))
async for row in result:
doc = row["checkpoints"]
checkpoint = self.serde.loads_typed((doc["type"], doc["checkpoint"]))
yield CheckpointTuple(
{
"configurable": {
"thread_id": doc["thread_id"],
"checkpoint_ns": doc["checkpoint_ns"],
"checkpoint_id": doc["checkpoint_id"],
}
},
checkpoint,
self.serde.loads(doc["metadata"].encode()),
(
{
"configurable": {
"thread_id": doc["thread_id"],
"checkpoint_ns": doc["checkpoint_ns"],
"checkpoint_id": doc["parent_checkpoint_id"],
}
}
if doc.get("parent_checkpoint_id")
else None
),
)
async def aput(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
"""Save a checkpoint to the database asynchronously.
This method saves a checkpoint to the Couchbase database. The checkpoint is associated
with the provided config and its parent config (if any).
Args:
config (RunnableConfig): The config to associate with the checkpoint.
checkpoint (Checkpoint): The checkpoint to save.
metadata (CheckpointMetadata): Additional metadata to save with the checkpoint.
new_versions (ChannelVersions): New channel versions as of this write.
Returns:
RunnableConfig: Updated configuration after storing the checkpoint.
"""
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
checkpoint_id = checkpoint["id"]
type_, serialized_checkpoint = self.serde.dumps_typed(checkpoint)
if serialized_checkpoint:
serialized_checkpoint = serialized_checkpoint.decode()
metadata = self.serde.dumps(metadata)
if metadata:
metadata = metadata.decode()
doc = {
"parent_checkpoint_id": config["configurable"].get("checkpoint_id"),
"type": type_,
"checkpoint": serialized_checkpoint,
"metadata": metadata,
"thread_id" : thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
# print(json.dumps(doc))
upsert_key = f"{thread_id}::{checkpoint_ns}::{checkpoint_id}"
collection = self.bucket.scope(self.scope_name).collection("checkpoints")
await collection.upsert(upsert_key, (doc), UpsertOptions(timeout=timedelta(seconds=5)))
return {
"configurable": {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
}
}
async def aput_writes(
self,
config: RunnableConfig,
writes: Sequence[Tuple[str, Any]],
task_id: str,
) -> None:
"""Store intermediate writes linked to a checkpoint asynchronously.
This method saves intermediate writes associated with a checkpoint to the Couchbase database.
Args:
config (RunnableConfig): Configuration of the related checkpoint.
writes (Sequence[Tuple[str, Any]]): List of writes to store, each as (channel, value) pair.
task_id (str): Identifier for the task creating the writes.
"""
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
checkpoint_id = config["configurable"]["checkpoint_id"]
collection = self.bucket.scope(self.scope_name).collection('checkpoint_writes')
for idx, (channel, value) in enumerate(writes):
upsert_key = f"{thread_id}::{checkpoint_ns}::{checkpoint_id}::{task_id}::{idx}"
type_, serialized_value = self.serde.dumps_typed(value)
if serialized_value:
serialized_value = serialized_value.decode().replace("'", '"')
doc = {
"thread_id": thread_id,
"checkpoint_ns": checkpoint_ns,
"checkpoint_id": checkpoint_id,
"task_id": task_id,
"idx": idx,
"channel": channel,
"type": type_,
"value": serialized_value,
}
await collection.upsert(upsert_key, (doc), UpsertOptions(timeout=timedelta(seconds=5)))
We are using a tool get_weather
which gives the weather information based on the city. This tool gives weather information based on the city. We are also setting up the Chat GPT model here
from typing import Literal
from langchain_core.runnables import ConfigurableField
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
@tool
def get_weather(city: Literal["nyc", "sf"]):
"""Use this to get weather information."""
if city == "nyc":
return "It might be cloudy in nyc"
elif city == "sf":
return "It's always sunny in sf"
else:
raise AssertionError("Unknown city")
tools = [get_weather]
model = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)
Here we will create a Couchbase connection. We are using local setup with bucket test
, langgraph
scope. We will also require checkpoints
and checkpoint_writes
as collections inside.
Then a ReAct Agent is created with GPT Model, weather tool and Couchbase checkpointer.
LangGraph's graph is invoked with message for GPT, storing all the state in Couchbase. We use get, get_tuple and list methods to fetch the states again
with CouchbaseSaver.from_conn_info(
cb_conn_str="couchbase://localhost",
cb_username="Administrator",
cb_password="password",
bucket_name="test",
scope_name="langgraph",
) as checkpointer:
graph = create_react_agent(model, tools=tools, checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
res = graph.invoke({"messages": [("human", "what's the weather in sf")]}, config)
latest_checkpoint = checkpointer.get(config)
latest_checkpoint_tuple = checkpointer.get_tuple(config)
checkpoint_tuples = list(checkpointer.list(config))
latest_checkpoint
{'v': 1,
'ts': '2024-09-02T15:36:56.312466+00:00',
'id': '1ef69412-f89c-6a04-8002-1edbcb45b47f',
'channel_values': {'messages': [HumanMessage(content="what's the weather in sf", id='b1e35f4e-28b0-4e98-aedb-9ae848e3d8a9'),
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'function': {'arguments': '{"city":"sf"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 57, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-0bc63960-ba12-46cf-b5dc-137f29a1238b-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'sf'}, 'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 57, 'output_tokens': 14, 'total_tokens': 71}),
ToolMessage(content="It's always sunny in sf", name='get_weather', id='7032ea51-9e50-4564-bcae-2645ecd0b55f', tool_call_id='call_WDTjM34Km4HcTubnpR3CzchQ')],
'tools': 'tools'},
'channel_versions': {'__start__': 2,
'messages': 4,
'start:agent': 3,
'agent': 4,
'branch:agent:should_continue:tools': 4,
'tools': 4},
'versions_seen': {'__input__': {},
'__start__': {'__start__': 1},
'agent': {'start:agent': 2},
'tools': {'branch:agent:should_continue:tools': 3}},
'pending_sends': []}
latest_checkpoint_tuple
CheckpointTuple(config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0357-6674-8003-77dd94f370df'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:57.437545+00:00', 'id': '1ef69413-0357-6674-8003-77dd94f370df', 'channel_values': {'messages': [HumanMessage(content="what's the weather in sf", id='b1e35f4e-28b0-4e98-aedb-9ae848e3d8a9'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'function': {'arguments': '{"city":"sf"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 57, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-0bc63960-ba12-46cf-b5dc-137f29a1238b-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'sf'}, 'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 57, 'output_tokens': 14, 'total_tokens': 71}), ToolMessage(content="It's always sunny in sf", name='get_weather', id='7032ea51-9e50-4564-bcae-2645ecd0b55f', tool_call_id='call_WDTjM34Km4HcTubnpR3CzchQ'), AIMessage(content='The weather in San Francisco is always sunny!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 84, 'total_tokens': 94}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-a462cd6b-c35b-4443-8611-d5fa9b79f3a8-0', usage_metadata={'input_tokens': 84, 'output_tokens': 10, 'total_tokens': 94})], 'agent': 'agent'}, 'channel_versions': {'__start__': 2, 'messages': 5, 'start:agent': 3, 'agent': 5, 'branch:agent:should_continue:tools': 4, 'tools': 5}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2, 'tools': 4}, 'tools': {'branch:agent:should_continue:tools': 3}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'agent': {'messages': [AIMessage(content='The weather in San Francisco is always sunny!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 84, 'total_tokens': 94}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-a462cd6b-c35b-4443-8611-d5fa9b79f3a8-0', usage_metadata={'input_tokens': 84, 'output_tokens': 10, 'total_tokens': 94})]}}, 'step': 3, 'parents': {}}, parent_config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f89c-6a04-8002-1edbcb45b47f'}}, pending_writes=[])
checkpoint_tuples
[CheckpointTuple(config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0357-6674-8003-77dd94f370df'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:57.437545+00:00', 'id': '1ef69413-0357-6674-8003-77dd94f370df', 'channel_values': {'messages': [HumanMessage(content="what's the weather in sf", id='b1e35f4e-28b0-4e98-aedb-9ae848e3d8a9'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'function': {'arguments': '{"city":"sf"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 57, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-0bc63960-ba12-46cf-b5dc-137f29a1238b-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'sf'}, 'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 57, 'output_tokens': 14, 'total_tokens': 71}), ToolMessage(content="It's always sunny in sf", name='get_weather', id='7032ea51-9e50-4564-bcae-2645ecd0b55f', tool_call_id='call_WDTjM34Km4HcTubnpR3CzchQ'), AIMessage(content='The weather in San Francisco is always sunny!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 84, 'total_tokens': 94}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-a462cd6b-c35b-4443-8611-d5fa9b79f3a8-0', usage_metadata={'input_tokens': 84, 'output_tokens': 10, 'total_tokens': 94})], 'agent': 'agent'}, 'channel_versions': {'__start__': 2, 'messages': 5, 'start:agent': 3, 'agent': 5, 'branch:agent:should_continue:tools': 4, 'tools': 5}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2, 'tools': 4}, 'tools': {'branch:agent:should_continue:tools': 3}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'agent': {'messages': [AIMessage(content='The weather in San Francisco is always sunny!', response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 84, 'total_tokens': 94}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-a462cd6b-c35b-4443-8611-d5fa9b79f3a8-0', usage_metadata={'input_tokens': 84, 'output_tokens': 10, 'total_tokens': 94})]}}, 'step': 3, 'parents': {}}, parent_config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f89c-6a04-8002-1edbcb45b47f'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f89c-6a04-8002-1edbcb45b47f'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:56.312466+00:00', 'id': '1ef69412-f89c-6a04-8002-1edbcb45b47f', 'channel_values': {'messages': [HumanMessage(content="what's the weather in sf", id='b1e35f4e-28b0-4e98-aedb-9ae848e3d8a9'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'function': {'arguments': '{"city":"sf"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 57, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-0bc63960-ba12-46cf-b5dc-137f29a1238b-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'sf'}, 'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 57, 'output_tokens': 14, 'total_tokens': 71}), ToolMessage(content="It's always sunny in sf", name='get_weather', id='7032ea51-9e50-4564-bcae-2645ecd0b55f', tool_call_id='call_WDTjM34Km4HcTubnpR3CzchQ')], 'tools': 'tools'}, 'channel_versions': {'__start__': 2, 'messages': 4, 'start:agent': 3, 'agent': 4, 'branch:agent:should_continue:tools': 4, 'tools': 4}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2}, 'tools': {'branch:agent:should_continue:tools': 3}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'tools': {'messages': [ToolMessage(content="It's always sunny in sf", name='get_weather', id='7032ea51-9e50-4564-bcae-2645ecd0b55f', tool_call_id='call_WDTjM34Km4HcTubnpR3CzchQ')]}}, 'step': 2, 'parents': {}}, parent_config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f896-6208-8001-942e3147b40e'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f896-6208-8001-942e3147b40e'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:56.309804+00:00', 'id': '1ef69412-f896-6208-8001-942e3147b40e', 'channel_values': {'messages': [HumanMessage(content="what's the weather in sf", id='b1e35f4e-28b0-4e98-aedb-9ae848e3d8a9'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'function': {'arguments': '{"city":"sf"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 57, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-0bc63960-ba12-46cf-b5dc-137f29a1238b-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'sf'}, 'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 57, 'output_tokens': 14, 'total_tokens': 71})], 'agent': 'agent', 'branch:agent:should_continue:tools': 'agent'}, 'channel_versions': {'__start__': 2, 'messages': 3, 'start:agent': 3, 'agent': 3, 'branch:agent:should_continue:tools': 3}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'function': {'arguments': '{"city":"sf"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 57, 'total_tokens': 71}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-0bc63960-ba12-46cf-b5dc-137f29a1238b-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'sf'}, 'id': 'call_WDTjM34Km4HcTubnpR3CzchQ', 'type': 'tool_call'}], usage_metadata={'input_tokens': 57, 'output_tokens': 14, 'total_tokens': 71})]}}, 'step': 1, 'parents': {}}, parent_config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f1ab-6394-8000-bd11376d13f6'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f1ab-6394-8000-bd11376d13f6'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:55.584442+00:00', 'id': '1ef69412-f1ab-6394-8000-bd11376d13f6', 'channel_values': {'messages': [HumanMessage(content="what's the weather in sf", id='b1e35f4e-28b0-4e98-aedb-9ae848e3d8a9')], 'start:agent': '__start__'}, 'channel_versions': {'__start__': 2, 'messages': 2, 'start:agent': 2}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': None, 'step': 0, 'parents': {}}, parent_config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f1a7-6e92-bfff-1ee8c38ad7cf'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '1', 'checkpoint_ns': '', 'checkpoint_id': '1ef69412-f1a7-6e92-bfff-1ee8c38ad7cf'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:55.583086+00:00', 'id': '1ef69412-f1a7-6e92-bfff-1ee8c38ad7cf', 'channel_values': {'__start__': {'messages': [['human', "what's the weather in sf"]]}}, 'channel_versions': {'__start__': 1}, 'versions_seen': {'__input__': {}}, 'pending_sends': []}, metadata={'source': 'input', 'writes': {'__start__': {'messages': [['human', "what's the weather in sf"]]}}, 'step': -1, 'parents': {}}, parent_config=None, pending_writes=None)]
This is the asynchronous example, Here we will create a Couchbase connection. We are using local setup with bucket test
, langgraph
scope. We will also require checkpoints
and checkpoint_writes
as collections inside.
Then a ReAct Agent is created with GPT Model, weather tool and Couchbase checkpointer.
LangGraph's graph is invoked with message for GPT, storing all the state in Couchbase. We use get, get_tuple and list methods to fetch the states again
async with AsyncCouchbaseSaver.from_conn_info(
cb_conn_str="couchbase://localhost",
cb_username="Administrator",
cb_password="password",
bucket_name="test",
scope_name="langgraph",
) as checkpointer:
graph = create_react_agent(model, tools=tools, checkpointer=checkpointer)
config = {"configurable": {"thread_id": "2"}}
res = await graph.ainvoke(
{"messages": [("human", "what's the weather in nyc")]}, config
)
latest_checkpoint = await checkpointer.aget(config)
latest_checkpoint_tuple = await checkpointer.aget_tuple(config)
checkpoint_tuples = [c async for c in checkpointer.alist(config)]
thread_id: 2 checkpoint_ns: checkpoint_id: None
SELECT * FROM test.langgraph.`checkpoints` WHERE thread_id = $1 AND checkpoint_ns = $2 ORDER BY checkpoint_id DESC LIMIT 1
thread_id: 2 checkpoint_ns: checkpoint_id: None
SELECT * FROM test.langgraph.`checkpoints` WHERE thread_id = $1 AND checkpoint_ns = $2 ORDER BY checkpoint_id DESC LIMIT 1
SELECT * FROM test.langgraph.`checkpoint_writes` WHERE thread_id = $1 AND checkpoint_ns = $2 AND checkpoint_id = $3 ['2', '', '1ef69413-10bb-6d4c-8003-19c466b7447a']
thread_id: 2 checkpoint_ns: checkpoint_id: None
SELECT * FROM test.langgraph.`checkpoints` WHERE thread_id = $1 AND checkpoint_ns = $2 ORDER BY checkpoint_id DESC LIMIT 1
SELECT * FROM test.langgraph.`checkpoint_writes` WHERE thread_id = $1 AND checkpoint_ns = $2 AND checkpoint_id = $3 ['2', '', '1ef69413-10bb-6d4c-8003-19c466b7447a']
latest_checkpoint
{'v': 1,
'ts': '2024-09-02T15:36:58.841829+00:00',
'id': '1ef69413-10bb-6d4c-8003-19c466b7447a',
'channel_values': {'messages': [HumanMessage(content="what's the weather in nyc", id='38e11355-dfcb-4419-a866-ce57e7879aa8'),
AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'function': {'arguments': '{"city":"nyc"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 58, 'total_tokens': 73}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2ab2d766-1e9f-47a0-a50b-5d1c84a94dd6-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'nyc'}, 'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'type': 'tool_call'}], usage_metadata={'input_tokens': 58, 'output_tokens': 15, 'total_tokens': 73}),
ToolMessage(content='It might be cloudy in nyc', name='get_weather', id='a56b34e5-6915-494c-b2e3-8eafe1d66119', tool_call_id='call_uqSSzLFdE0jhG6OhbKuQfcyB'),
AIMessage(content='The weather in NYC might be cloudy.', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 88, 'total_tokens': 97}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-dd0a701e-52d0-499b-847e-9b93577b4508-0', usage_metadata={'input_tokens': 88, 'output_tokens': 9, 'total_tokens': 97})],
'agent': 'agent'},
'channel_versions': {'__start__': 2,
'messages': 5,
'start:agent': 3,
'agent': 5,
'branch:agent:should_continue:tools': 4,
'tools': 5},
'versions_seen': {'__input__': {},
'__start__': {'__start__': 1},
'agent': {'start:agent': 2, 'tools': 4},
'tools': {'branch:agent:should_continue:tools': 3}},
'pending_sends': []}
latest_checkpoint_tuple
CheckpointTuple(config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-10bb-6d4c-8003-19c466b7447a'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:58.841829+00:00', 'id': '1ef69413-10bb-6d4c-8003-19c466b7447a', 'channel_values': {'messages': [HumanMessage(content="what's the weather in nyc", id='38e11355-dfcb-4419-a866-ce57e7879aa8'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'function': {'arguments': '{"city":"nyc"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 58, 'total_tokens': 73}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2ab2d766-1e9f-47a0-a50b-5d1c84a94dd6-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'nyc'}, 'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'type': 'tool_call'}], usage_metadata={'input_tokens': 58, 'output_tokens': 15, 'total_tokens': 73}), ToolMessage(content='It might be cloudy in nyc', name='get_weather', id='a56b34e5-6915-494c-b2e3-8eafe1d66119', tool_call_id='call_uqSSzLFdE0jhG6OhbKuQfcyB'), AIMessage(content='The weather in NYC might be cloudy.', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 88, 'total_tokens': 97}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-dd0a701e-52d0-499b-847e-9b93577b4508-0', usage_metadata={'input_tokens': 88, 'output_tokens': 9, 'total_tokens': 97})], 'agent': 'agent'}, 'channel_versions': {'__start__': 2, 'messages': 5, 'start:agent': 3, 'agent': 5, 'branch:agent:should_continue:tools': 4, 'tools': 5}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2, 'tools': 4}, 'tools': {'branch:agent:should_continue:tools': 3}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'agent': {'messages': [AIMessage(content='The weather in NYC might be cloudy.', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 88, 'total_tokens': 97}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-dd0a701e-52d0-499b-847e-9b93577b4508-0', usage_metadata={'input_tokens': 88, 'output_tokens': 9, 'total_tokens': 97})]}}, 'step': 3, 'parents': {}}, parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0aa2-600a-8002-2197bf6a772b'}}, pending_writes=[])
checkpoint_tuples
[CheckpointTuple(config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-10bb-6d4c-8003-19c466b7447a'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:58.841829+00:00', 'id': '1ef69413-10bb-6d4c-8003-19c466b7447a', 'channel_values': {'messages': [HumanMessage(content="what's the weather in nyc", id='38e11355-dfcb-4419-a866-ce57e7879aa8'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'function': {'arguments': '{"city":"nyc"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 58, 'total_tokens': 73}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2ab2d766-1e9f-47a0-a50b-5d1c84a94dd6-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'nyc'}, 'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'type': 'tool_call'}], usage_metadata={'input_tokens': 58, 'output_tokens': 15, 'total_tokens': 73}), ToolMessage(content='It might be cloudy in nyc', name='get_weather', id='a56b34e5-6915-494c-b2e3-8eafe1d66119', tool_call_id='call_uqSSzLFdE0jhG6OhbKuQfcyB'), AIMessage(content='The weather in NYC might be cloudy.', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 88, 'total_tokens': 97}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-dd0a701e-52d0-499b-847e-9b93577b4508-0', usage_metadata={'input_tokens': 88, 'output_tokens': 9, 'total_tokens': 97})], 'agent': 'agent'}, 'channel_versions': {'__start__': 2, 'messages': 5, 'start:agent': 3, 'agent': 5, 'branch:agent:should_continue:tools': 4, 'tools': 5}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2, 'tools': 4}, 'tools': {'branch:agent:should_continue:tools': 3}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'agent': {'messages': [AIMessage(content='The weather in NYC might be cloudy.', response_metadata={'token_usage': {'completion_tokens': 9, 'prompt_tokens': 88, 'total_tokens': 97}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'stop', 'logprobs': None}, id='run-dd0a701e-52d0-499b-847e-9b93577b4508-0', usage_metadata={'input_tokens': 88, 'output_tokens': 9, 'total_tokens': 97})]}}, 'step': 3, 'parents': {}}, parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0aa2-600a-8002-2197bf6a772b'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0aa2-600a-8002-2197bf6a772b'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:58.202103+00:00', 'id': '1ef69413-0aa2-600a-8002-2197bf6a772b', 'channel_values': {'messages': [HumanMessage(content="what's the weather in nyc", id='38e11355-dfcb-4419-a866-ce57e7879aa8'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'function': {'arguments': '{"city":"nyc"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 58, 'total_tokens': 73}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2ab2d766-1e9f-47a0-a50b-5d1c84a94dd6-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'nyc'}, 'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'type': 'tool_call'}], usage_metadata={'input_tokens': 58, 'output_tokens': 15, 'total_tokens': 73}), ToolMessage(content='It might be cloudy in nyc', name='get_weather', id='a56b34e5-6915-494c-b2e3-8eafe1d66119', tool_call_id='call_uqSSzLFdE0jhG6OhbKuQfcyB')], 'tools': 'tools'}, 'channel_versions': {'__start__': 2, 'messages': 4, 'start:agent': 3, 'agent': 4, 'branch:agent:should_continue:tools': 4, 'tools': 4}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2}, 'tools': {'branch:agent:should_continue:tools': 3}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'tools': {'messages': [ToolMessage(content='It might be cloudy in nyc', name='get_weather', id='a56b34e5-6915-494c-b2e3-8eafe1d66119', tool_call_id='call_uqSSzLFdE0jhG6OhbKuQfcyB')]}}, 'step': 2, 'parents': {}}, parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0a99-65c2-8001-b1a9f602ce61'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0a99-65c2-8001-b1a9f602ce61'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:58.198566+00:00', 'id': '1ef69413-0a99-65c2-8001-b1a9f602ce61', 'channel_values': {'messages': [HumanMessage(content="what's the weather in nyc", id='38e11355-dfcb-4419-a866-ce57e7879aa8'), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'function': {'arguments': '{"city":"nyc"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 58, 'total_tokens': 73}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2ab2d766-1e9f-47a0-a50b-5d1c84a94dd6-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'nyc'}, 'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'type': 'tool_call'}], usage_metadata={'input_tokens': 58, 'output_tokens': 15, 'total_tokens': 73})], 'agent': 'agent', 'branch:agent:should_continue:tools': 'agent'}, 'channel_versions': {'__start__': 2, 'messages': 3, 'start:agent': 3, 'agent': 3, 'branch:agent:should_continue:tools': 3}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}, 'agent': {'start:agent': 2}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': {'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'function': {'arguments': '{"city":"nyc"}', 'name': 'get_weather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 58, 'total_tokens': 73}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_f33667828e', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2ab2d766-1e9f-47a0-a50b-5d1c84a94dd6-0', tool_calls=[{'name': 'get_weather', 'args': {'city': 'nyc'}, 'id': 'call_uqSSzLFdE0jhG6OhbKuQfcyB', 'type': 'tool_call'}], usage_metadata={'input_tokens': 58, 'output_tokens': 15, 'total_tokens': 73})]}}, 'step': 1, 'parents': {}}, parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0400-67ce-8000-53532fd907e8'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-0400-67ce-8000-53532fd907e8'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:57.506804+00:00', 'id': '1ef69413-0400-67ce-8000-53532fd907e8', 'channel_values': {'messages': [HumanMessage(content="what's the weather in nyc", id='38e11355-dfcb-4419-a866-ce57e7879aa8')], 'start:agent': '__start__'}, 'channel_versions': {'__start__': 2, 'messages': 2, 'start:agent': 2}, 'versions_seen': {'__input__': {}, '__start__': {'__start__': 1}}, 'pending_sends': []}, metadata={'source': 'loop', 'writes': None, 'step': 0, 'parents': {}}, parent_config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-03fd-61e6-bfff-81609956e564'}}, pending_writes=None),
CheckpointTuple(config={'configurable': {'thread_id': '2', 'checkpoint_ns': '', 'checkpoint_id': '1ef69413-03fd-61e6-bfff-81609956e564'}}, checkpoint={'v': 1, 'ts': '2024-09-02T15:36:57.505419+00:00', 'id': '1ef69413-03fd-61e6-bfff-81609956e564', 'channel_values': {'__start__': {'messages': [['human', "what's the weather in nyc"]]}}, 'channel_versions': {'__start__': 1}, 'versions_seen': {'__input__': {}}, 'pending_sends': []}, metadata={'source': 'input', 'writes': {'__start__': {'messages': [['human', "what's the weather in nyc"]]}}, 'step': -1, 'parents': {}}, parent_config=None, pending_writes=None)]