import os from langchain_openai import ChatOpenAI from langchain.agents import AgentExecutor, create_react_agent from langchain.tools import tool from tools import dalle, tcp_dump, get_adapter_interface, ip_loc, pcap_summary from langchain import hub from langchain_community.tools import ShellTool from langsmith import Client from dotenv import load_dotenv load_dotenv() """ Main rag tool-chain application. Simply load the tools and prompt the user. """ os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_PROJECT"] = f"LangSmith Introduction" os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" client = Client() shell_tool = ShellTool() llm = ChatOpenAI(model_name="gpt-4o", temperature=0) tools = [] tools.extend([tcp_dump, ip_loc, dalle, pcap_summary, get_adapter_interface]) base_prompt = hub.pull("langchain-ai/react-agent-template") prompt = base_prompt.partial( instructions=""" You are a packet analysis assistant. Use any combination of the tools provided to best serve the users request. If the request cannot be served with the tools provided, state why and offer advice on how the user could solve the problem. Never make assumptions about which network adapter to use, always check the adapter name with the correct tool. """ ) agent = create_react_agent(llm, tools, prompt) agent_executor = AgentExecutor( agent=agent, tools=tools, verbose=True, handle_parsing_errors=True ) print( "I am a packet analysis an assistant. I can perform various tasks related to packet capture files, including capture, summarization, IP lookups, and visualization." ) print(f"I am configured with the following tools") for tool in tools: print(f" Tool: {tool.name} = {tool.description}") while True: line = input("llm>> ") if line: result = agent_executor.invoke({"input": line}) print(result["output"]) else: break