diff --git a/src/docs.json b/src/docs.json index 3b3041513a..7aac3491cd 100644 --- a/src/docs.json +++ b/src/docs.json @@ -1248,7 +1248,7 @@ { "group": "Tutorials", "pages": [ - "langsmith/autogen-integration", + "langsmith/deploy-other-frameworks", "langsmith/use-stream-react", "langsmith/generative-ui-react" ] diff --git a/src/langsmith/app-development.mdx b/src/langsmith/app-development.mdx index b36a0cae36..f7a1457465 100644 --- a/src/langsmith/app-development.mdx +++ b/src/langsmith/app-development.mdx @@ -8,7 +8,7 @@ mode: wide LangGraph provides the core abstractions and execution model, while LangSmith adds managed infrastructure, observability, deployment options, assistants, and concurrency controls—supporting the full lifecycle from development to production. -LangSmith Deployment is framework-agnostic: you can deploy agents built with LangGraph or [other frameworks](/langsmith/autogen-integration). To get started with LangGraph itself, refer to the [LangGraph quickstart](/oss/langgraph/quickstart). +LangSmith Deployment is framework-agnostic: you can deploy agents built with LangGraph or [other frameworks](/langsmith/deploy-other-frameworks). To get started with LangGraph itself, refer to the [LangGraph quickstart](/oss/langgraph/quickstart). @@ -43,7 +43,7 @@ Streaming, human-in-the-loop, webhooks, and concurrency controls like double-tex Step-by-step examples: AutoGen integration, streaming UI, and generative UI in React. diff --git a/src/langsmith/autogen-integration.mdx b/src/langsmith/autogen-integration.mdx deleted file mode 100644 index 4e956ade36..0000000000 --- a/src/langsmith/autogen-integration.mdx +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: How to integrate LangGraph with AutoGen, CrewAI, and other frameworks -sidebarTitle: Integrate LangGraph with AutoGen, CrewAI, and other frameworks ---- -This guide shows how to integrate AutoGen agents with LangGraph to leverage features like persistence, streaming, and memory, and then deploy the integrated solution to LangSmith for scalable production use. In this guide we show how to build a LangGraph chatbot that integrates with AutoGen, but you can follow the same approach with other frameworks. - -Integrating AutoGen with LangGraph provides several benefits: - -* Enhanced features: Add [persistence](/oss/langgraph/persistence), [streaming](/langsmith/streaming), [short and long-term memory](/oss/concepts/memory) and more to your AutoGen agents. -* Multi-agent systems: Build [multi-agent systems](/oss/langchain/multi-agent) where individual agents are built with different frameworks. -* Production deployment: Deploy your integrated solution to [LangSmith](/langsmith/home) for scalable production use. - -## Prerequisites - -* Python 3.9+ -* Autogen: `pip install autogen` -* LangGraph: `pip install langgraph` -* OpenAI API key - -## Setup - -Set your your environment: - -```python -import getpass -import os - - -def _set_env(var: str): - if not os.environ.get(var): - os.environ[var] = getpass.getpass(f"{var}: ") - - -_set_env("OPENAI_API_KEY") -``` - -## 1. Define AutoGen agent - -Create an AutoGen agent that can execute code. This example is adapted from AutoGen's [official tutorials](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_web_info.ipynb): - -```python -import autogen -import os - -config_list = [{"model": "gpt-4o", "api_key": os.environ["OPENAI_API_KEY"]}] - -llm_config = { - "timeout": 600, - "cache_seed": 42, - "config_list": config_list, - "temperature": 0, -} - -autogen_agent = autogen.AssistantAgent( - name="assistant", - llm_config=llm_config, -) - -user_proxy = autogen.UserProxyAgent( - name="user_proxy", - human_input_mode="NEVER", - max_consecutive_auto_reply=10, - is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), - code_execution_config={ - "work_dir": "web", - "use_docker": False, - }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly. - llm_config=llm_config, - system_message="Reply TERMINATE if the task has been solved at full satisfaction. Otherwise, reply CONTINUE, or the reason why the task is not solved yet.", -) -``` - -## 2. Create the graph - -We will now create a LangGraph chatbot graph that calls AutoGen agent. - -```python -from langchain_core.messages import convert_to_openai_messages -from langgraph.graph import StateGraph, MessagesState, START -from langgraph.checkpoint.memory import MemorySaver - -def call_autogen_agent(state: MessagesState): - # Convert LangGraph messages to OpenAI format for AutoGen - messages = convert_to_openai_messages(state["messages"]) - - # Get the last user message - last_message = messages[-1] - - # Pass previous message history as context (excluding the last message) - carryover = messages[:-1] if len(messages) > 1 else [] - - # Initiate chat with AutoGen - response = user_proxy.initiate_chat( - autogen_agent, - message=last_message, - carryover=carryover - ) - - # Extract the final response from the agent - final_content = response.chat_history[-1]["content"] - - # Return the response in LangGraph format - return {"messages": {"role": "assistant", "content": final_content}} - -# Create the graph with memory for persistence -checkpointer = MemorySaver() - -# Build the graph -builder = StateGraph(MessagesState) -builder.add_node("autogen", call_autogen_agent) -builder.add_edge(START, "autogen") - -# Compile with checkpointer for persistence -graph = builder.compile(checkpointer=checkpointer) -``` - -```python -from IPython.display import display, Image - -display(Image(graph.get_graph().draw_mermaid_png())) -``` - -![LangGraph chatbot with one step: START routes to autogen, where call_autogen_agent sends the latest user message (with prior context) to the AutoGen agent.](/langsmith/images/autogen-output.png) - -## 3. Test the graph locally - -Before deploying to LangSmith, you can test the graph locally: - -```python {highlight={2,13}} -# pass the thread ID to persist agent outputs for future interactions -config = {"configurable": {"thread_id": "1"}} - -for chunk in graph.stream( - { - "messages": [ - { - "role": "user", - "content": "Find numbers between 10 and 30 in fibonacci sequence", - } - ] - }, - config, -): - print(chunk) -``` - -**Output:** - -``` -user_proxy (to assistant): - -Find numbers between 10 and 30 in fibonacci sequence - --------------------------------------------------------------------------------- -assistant (to user_proxy): - -To find numbers between 10 and 30 in the Fibonacci sequence, we can generate the Fibonacci sequence and check which numbers fall within this range. Here's a plan: - -1. Generate Fibonacci numbers starting from 0. -2. Continue generating until the numbers exceed 30. -3. Collect and print the numbers that are between 10 and 30. - -... -``` - -Since we're leveraging LangGraph's [persistence](/oss/langgraph/persistence) features we can now continue the conversation using the same thread ID -- LangGraph will automatically pass previous history to the AutoGen agent: - -```python {highlight={10}} -for chunk in graph.stream( - { - "messages": [ - { - "role": "user", - "content": "Multiply the last number by 3", - } - ] - }, - config, -): - print(chunk) -``` - -**Output:** - -``` -user_proxy (to assistant): - -Multiply the last number by 3 -Context: -Find numbers between 10 and 30 in fibonacci sequence -The Fibonacci numbers between 10 and 30 are 13 and 21. - -These numbers are part of the Fibonacci sequence, which is generated by adding the two preceding numbers to get the next number, starting from 0 and 1. - -The sequence goes: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, ... - -As you can see, 13 and 21 are the only numbers in this sequence that fall between 10 and 30. - -TERMINATE - --------------------------------------------------------------------------------- -assistant (to user_proxy): - -The last number in the Fibonacci sequence between 10 and 30 is 21. Multiplying 21 by 3 gives: - -21 * 3 = 63 - -TERMINATE - --------------------------------------------------------------------------------- -{'call_autogen_agent': {'messages': {'role': 'assistant', 'content': 'The last number in the Fibonacci sequence between 10 and 30 is 21. Multiplying 21 by 3 gives:\n\n21 * 3 = 63\n\nTERMINATE'}}} -``` - -## 4. Prepare for deployment - -To deploy to LangSmith, create a file structure like the following: - -``` -my-autogen-agent/ -├── agent.py # Your main agent code -├── requirements.txt # Python dependencies -└── langgraph.json # LangGraph configuration -``` - - - - ```python - import os - import autogen - from langchain_core.messages import convert_to_openai_messages - from langgraph.graph import StateGraph, MessagesState, START - from langgraph.checkpoint.memory import MemorySaver - - # AutoGen configuration - config_list = [{"model": "gpt-4o", "api_key": os.environ["OPENAI_API_KEY"]}] - - llm_config = { - "timeout": 600, - "cache_seed": 42, - "config_list": config_list, - "temperature": 0, - } - - # Create AutoGen agents - autogen_agent = autogen.AssistantAgent( - name="assistant", - llm_config=llm_config, - ) - - user_proxy = autogen.UserProxyAgent( - name="user_proxy", - human_input_mode="NEVER", - max_consecutive_auto_reply=10, - is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), - code_execution_config={ - "work_dir": "/tmp/autogen_work", - "use_docker": False, - }, - llm_config=llm_config, - system_message="Reply TERMINATE if the task has been solved at full satisfaction.", - ) - - def call_autogen_agent(state: MessagesState): - """Node function that calls the AutoGen agent""" - messages = convert_to_openai_messages(state["messages"]) - last_message = messages[-1] - carryover = messages[:-1] if len(messages) > 1 else [] - - response = user_proxy.initiate_chat( - autogen_agent, - message=last_message, - carryover=carryover - ) - - final_content = response.chat_history[-1]["content"] - return {"messages": {"role": "assistant", "content": final_content}} - - # Create and compile the graph - def create_graph(): - checkpointer = MemorySaver() - builder = StateGraph(MessagesState) - builder.add_node("autogen", call_autogen_agent) - builder.add_edge(START, "autogen") - return builder.compile(checkpointer=checkpointer) - - # Export the graph for LangSmith - graph = create_graph() - ``` - - - ``` - langgraph>=0.1.0 - pyautogen>=0.2.0 - langchain-core>=0.1.0 - langchain-openai>=0.0.5 - ``` - - - ```json - { - "dependencies": ["."], - "graphs": { - "autogen_agent": "./agent.py:graph" - }, - "env": ".env" - } - ``` - - - -## 5. Deploy to LangSmith - -Deploy the graph with the LangSmith CLI: - - -```bash pip -pip install -U langgraph-cli -``` - -```bash uv -uv add langgraph-cli -``` - - -``` -langgraph deploy --config langgraph.json -``` diff --git a/src/langsmith/deploy-other-frameworks.mdx b/src/langsmith/deploy-other-frameworks.mdx new file mode 100644 index 0000000000..74ff1a0eba --- /dev/null +++ b/src/langsmith/deploy-other-frameworks.mdx @@ -0,0 +1,105 @@ +--- +title: Deploy other frameworks +sidebarTitle: Deploy other frameworks (e.g., Strands, CrewAI) +--- +This guide shows you how to use [Functional API](/oss/langgraph/functional-api) to deploy a [Strands Agent](https://strandsagents.com/latest/documentation/docs/) on [LangSmith Deployment](/langsmith/deployments) and set up tracing for [LangSmith Observability](/langsmith/observability). You can follow the same approach with other frameworks like CrewAI, AutoGen, Google ADK. + +Using Functional API and deploying to LangSmith Deployment provides several benefits: + +* Production deployment: Deploy your integrated solution to [LangSmith Deployment](/langsmith/deployments) for scalable production use. +* Enhanced features: With Functional API, you can integrate your existing agents with [persistence](/oss/langgraph/persistence), [streaming](/langsmith/streaming), [short and long-term memory](/oss/concepts/memory) and more, with minimal changes to your existing code. +* Multi-agent systems: Build [multi-agent systems](/oss/langchain/multi-agent) where individual agents are built with different frameworks. + +## Prerequisites + +* Python 3.9+ +* Dependencies: `pip install strands-agents strands-agents-tools langgraph` +* AWS Credentials in your environment + +## 1. Define Strands agent + +Create a Strands Agent with pre-built tools. + +```python +from strands import Agent +from strands_tools import file_read, file_write, python_repl, shell, journal + +agent = Agent( + tools=[file_read, file_write, python_repl, shell, journal], + system_prompt="You are an Expert Software Developer Assistant specializing in web frameworks. Your task is to analyze project structures and identify mappings.", + model="us.anthropic.claude-3-7-sonnet-20250219-v1:0", + ) +``` + +## 2. Use Functional API to deploy on LangSmith Deployment + +[Functional API](/oss/langgraph/functional-api) allows you to intergate and deploy with frameworks other than LangChain. Functional API also provides the additional benefit to leverage other key features — persistence, memory, human-in-the-loop, and streaming — coupled with your existing agent, with minimal changes to your existing code. + +It uses two key building blocks: + +* **@[`@entrypoint`]**: Marks a function as the starting point of a workflow, encapsulating logic and managing execution flow, including handling long-running tasks and interrupts. +* **@[`@task`]**: Represents a discrete unit of work, such as an API call or data processing step, that can be executed asynchronously within an entrypoint. Tasks return a future-like object that can be awaited or resolved synchronously. + +```python +from strands.types.content import Message + +from langgraph.func import entrypoint, task +import operator + +@task +def invoke_strands(messages: list[Message]): + # run the agent with existing messages; can invoke with the final message with messages[-1] + result = agent(messages) + # return the resulting message + return [result.message] + +@entrypoint() +def workflow(messages: list[Message], previous: list[Message]): + messages = operator.add(previous or [], messages) + response = invoke_strands(messages).result() + return entrypoint.final(value=response, save=operator.add(messages, response)) +``` + +## 3. Set up tracing with OpenTelemetry + +In your environment variables, set up the following: + +```python +# Turn off LangSmith default tracing, as we want to only trace with OpenTelemetry +LANGSMITH_TRACING=false + +OTEL_EXPORTER_OTLP_ENDPOINT = "https://api.smith.langchain.com/otel/" + +OTEL_EXPORTER_OTLP_HEADERS = "x-api-key=your-langsmith-api-key,Langsmith-Project=your-tracing-project-name" +``` + + +If you're [self-hosting LangSmith](/langsmith/self-hosted), replace the `OTEL_EXPORTER_OTLP_ENDPOINT` endpoint with your LangSmith API endpoint and append `/api/v1/otel`. For example: `OTEL_EXPORTER_OTLP_ENDPOINT = "https://ai-company.com/api/v1/otel"` + + + +Strand's OTel tracing contains synchronous code. In this case, you may need to set `BG_JOB_ISOLATED_LOOPS=true` to execute background runs in an isolated event loop separate from the serving API event loop. + + +In your main agent, set up the following: + +```python +from strands.telemetry import StrandsTelemetry + +strands_telemetry = StrandsTelemetry() +strands_telemetry.setup_otlp_exporter() +strands_telemetry.setup_meter() +``` + +## 4. Prepare for deployment + +From here, to deploy to LangSmith, create a file structure like the following: + +``` +my-strands-agent/ +├── agent.py # Your main agent code +├── requirements.txt # Python dependencies +└── langgraph.json # LangGraph configuration +``` + +To deploy your agent, follow the [Deploy to cloud](/langsmith/deploy-to-cloud) guide. diff --git a/src/oss/langgraph/use-functional-api.mdx b/src/oss/langgraph/use-functional-api.mdx index b37329d59e..c8cb4a7aa4 100644 --- a/src/oss/langgraph/use-functional-api.mdx +++ b/src/oss/langgraph/use-functional-api.mdx @@ -1673,4 +1673,4 @@ for await (const chunk of await workflow.stream([inputMessage2], { ## Integrate with other libraries -* [Add LangGraph's features to other frameworks using the functional API](/langsmith/autogen-integration): Add LangGraph features like persistence, memory and streaming to other agent frameworks that do not provide them out of the box. +* [Add LangGraph's features to other frameworks using the functional API](/langsmith/deploy-other-frameworks): Add LangGraph features like persistence, memory and streaming to other agent frameworks that do not provide them out of the box.