Providers
LangChain Provider
Use Composio tools with LangChain
The LangChain Provider transforms Composio tools into a format compatible with LangChain's function calling capabilities.
Setup
pip install composio_langchain==0.8.0 langchainnpm install @composio/langchainUsage
from composio import Composio
from composio_langchain import LangchainProvider
from langchain import hub # type: ignore
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
# Pull relevant agent model.
prompt = hub.pull("hwchase17/openai-functions-agent")
# Initialize tools.
openai_client = ChatOpenAI(model="gpt-5")
composio = Composio(provider=LangchainProvider())
# Get All the tools
tools = composio.tools.get(user_id="default", toolkits=["GITHUB"])
# Define task
task = "Star a repo composiohq/composio on GitHub"
# Define agent
agent = create_openai_functions_agent(openai_client, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
# Execute using agent_executor
agent_executor.invoke({"input": task})import { ChatOpenAI } from '@langchain/openai';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import { ToolNode } from '@langchain/langgraph/prebuilt';
import { StateGraph, MessagesAnnotation } from '@langchain/langgraph';
import { Composio } from '@composio/core';
import { LangchainProvider } from '@composio/langchain';
// initiate composio
const composio = new Composio({
apiKey: process.env.COMPOSIO_API_KEY,
provider: new LangchainProvider(),
});
// fetch the tool
console.log(`🔄 Fetching the tool...`);
const tools = await composio.tools.get('default', 'HACKERNEWS_GET_USER');
// Define the tools for the agent to use
const toolNode = new ToolNode(tools);
// Create a model and give it access to the tools
const model = new ChatOpenAI({
model: 'gpt-4o-mini',
temperature: 0,
}).bindTools(tools);
// Define the function that determines whether to continue or not
function shouldContinue({ messages }: typeof MessagesAnnotation.State) {
const lastMessage = messages[messages.length - 1] as AIMessage;
// If the LLM makes a tool call, then we route to the "tools" node
if (lastMessage.tool_calls?.length) {
return 'tools';
}
// Otherwise, we stop (reply to the user) using the special "__end__" node
return '__end__';
}
// Define the function that calls the model
async function callModel(state: typeof MessagesAnnotation.State) {
console.log(`🔄 Calling the model...`);
const response = await model.invoke(state.messages);
// We return a list, because this will get added to the existing list
return { messages: [response] };
}
// Define a new graph
const workflow = new StateGraph(MessagesAnnotation)
.addNode('agent', callModel)
.addEdge('__start__', 'agent') // __start__ is a special name for the entrypoint
.addNode('tools', toolNode)
.addEdge('tools', 'agent')
.addConditionalEdges('agent', shouldContinue);
// Finally, we compile it into a LangChain Runnable.
const app = workflow.compile();
// Use the agent
const finalState = await app.invoke({
messages: [new HumanMessage('Find the details of the user `pg` on HackerNews')],
});
console.log(`✅ Message recieved from the model`);
console.log(finalState.messages[finalState.messages.length - 1].content);
const nextState = await app.invoke({
// Including the messages from the previous run gives the LLM context.
// This way it knows we're asking about the weather in NY
messages: [...finalState.messages, new HumanMessage('what about haxzie')],
});
console.log(`✅ Message recieved from the model`);
console.log(nextState.messages[nextState.messages.length - 1].content);