diff --git a/src/backend/base/langflow/base/agents/agent.py b/src/backend/base/langflow/base/agents/agent.py index ca2e52d275f..959696e0cec 100644 --- a/src/backend/base/langflow/base/agents/agent.py +++ b/src/backend/base/langflow/base/agents/agent.py @@ -136,10 +136,10 @@ def create_agent_runnable(self) -> Runnable: class LCToolsAgentComponent(LCAgentComponent): _base_inputs = [ - *LCAgentComponent._base_inputs, HandleInput( name="tools", display_name="Tools", input_types=["Tool", "BaseTool", "StructuredTool"], is_list=True ), + *LCAgentComponent._base_inputs, ] def build_agent(self) -> AgentExecutor: diff --git a/src/backend/base/langflow/components/agents/agent.py b/src/backend/base/langflow/components/agents/agent.py new file mode 100644 index 00000000000..bd4aaf2ba58 --- /dev/null +++ b/src/backend/base/langflow/components/agents/agent.py @@ -0,0 +1,156 @@ +from langflow.base.agents.agent import LCToolsAgentComponent +from langflow.base.models.model import LCModelComponent +from langflow.components.agents.tool_calling import ToolCallingAgentComponent +from langflow.components.helpers.memory import MemoryComponent +from langflow.components.models.azure_openai import AzureChatOpenAIComponent +from langflow.components.models.openai import OpenAIModelComponent +from langflow.io import ( + DropdownInput, + MultilineInput, + Output, +) +from langflow.schema.dotdict import dotdict +from langflow.schema.message import Message + + +def set_advanced_true(component_input): + component_input.advanced = True + return component_input + + +class AgentComponent(ToolCallingAgentComponent): + display_name: str = "Agent" + description: str = "Define the agent's instructions, then enter a task to complete using tools." + icon = "bot" + beta = True + name = "Agent" + + azure_inputs = [ + set_advanced_true(component_input) if component_input.name == "temperature" else component_input + for component_input in AzureChatOpenAIComponent().inputs + if component_input.name not in [input_field.name for input_field in LCModelComponent._base_inputs] + ] + openai_inputs = [ + set_advanced_true(component_input) if component_input.name == "temperature" else component_input + for component_input in OpenAIModelComponent().inputs + if component_input.name not in [input_field.name for input_field in LCModelComponent._base_inputs] + ] + + memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs] + + inputs = [ + DropdownInput( + name="agent_llm", + display_name="Model Provider", + options=["Azure OpenAI", "OpenAI", "Custom"], + value="OpenAI", + real_time_refresh=True, + refresh_button=True, + input_types=[], + ), + *openai_inputs, + MultilineInput( + name="system_prompt", + display_name="Agent Instructions", + info="Initial instructions and context provided to guide the agent's behavior.", + value="You are a helpful assistant that can use tools to answer questions and perform tasks.", + advanced=False, + ), + *LCToolsAgentComponent._base_inputs, + *memory_inputs, + ] + outputs = [Output(name="response", display_name="Response", method="get_response")] + + async def get_response(self) -> Message: + llm_model = self.get_llm() + if llm_model is None: + msg = "No language model selected" + raise ValueError(msg) + self.chat_history = self.get_memory_data() + + agent = ToolCallingAgentComponent().set( + llm=llm_model, + tools=[self.tools], + chat_history=self.chat_history, + input_value=self.input_value, + system_prompt=self.system_prompt, + ) + + return await agent.message_response() + + def get_memory_data(self): + memory_kwargs = { + component_input.name: getattr(self, f"{component_input.name}") for component_input in self.memory_inputs + } + + return MemoryComponent().set(**memory_kwargs).retrieve_messages() + + def get_llm(self): + try: + if self.agent_llm == "OpenAI": + return self._build_llm_model(OpenAIModelComponent(), self.openai_inputs) + if self.agent_llm == "Azure OpenAI": + return self._build_llm_model(AzureChatOpenAIComponent(), self.azure_inputs, prefix="azure_param_") + except Exception as e: + msg = f"Error building {self.agent_llm} language model" + raise ValueError(msg) from e + return self.agent_llm + + def _build_llm_model(self, component, inputs, prefix=""): + return component.set( + **{component_input.name: getattr(self, f"{prefix}{component_input.name}") for component_input in inputs} + ).build_model() + + def delete_fields(self, build_config, fields): + for field in fields: + build_config.pop(field, None) + + def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None): + if field_name == "agent_llm": + openai_fields = {component_input.name: component_input for component_input in self.openai_inputs} + azure_fields = { + f"azure_param_{component_input.name}": component_input for component_input in self.azure_inputs + } + + if field_value == "OpenAI": + self.delete_fields(build_config, {**azure_fields}) + if not any(field in build_config for field in openai_fields): + build_config.update(openai_fields) + build_config["agent_llm"]["input_types"] = [] + build_config = self.update_input_types(build_config) + + elif field_value == "Azure OpenAI": + self.delete_fields(build_config, {**openai_fields}) + build_config.update(azure_fields) + build_config["agent_llm"]["input_types"] = [] + build_config = self.update_input_types(build_config) + elif field_value == "Custom": + self.delete_fields(build_config, {**openai_fields}) + self.delete_fields(build_config, {**azure_fields}) + new_component = DropdownInput( + name="agent_llm", + display_name="Language Model", + options=["Azure OpenAI", "OpenAI", "Custom"], + value="Custom", + real_time_refresh=True, + input_types=["LanguageModel"], + ) + build_config.update({"agent_llm": new_component.to_dict()}) + build_config = self.update_input_types(build_config) + default_keys = ["code", "_type", "agent_llm", "tools", "input_value"] + missing_keys = [key for key in default_keys if key not in build_config] + if missing_keys: + msg = f"Missing required keys in build_config: {missing_keys}" + raise ValueError(msg) + return build_config + + def update_input_types(self, build_config): + for key, value in build_config.items(): + # Check if the value is a dictionary + if isinstance(value, dict): + if value.get("input_types") is None: + build_config[key]["input_types"] = [] + # Check if the value has an attribute 'input_types' and it is None + elif hasattr(value, "input_types") and value.input_types is None: + value.input_types = [] + return build_config diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json b/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json index 82153f047d6..a30d3bdd2e5 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json @@ -2,63 +2,41 @@ "data": { "edges": [ { + "animated": false, "className": "", "data": { "sourceHandle": { - "dataType": "ToolCallingAgent", - "id": "ToolCallingAgent-mf0BN", - "name": "response", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-Ag9YG", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "reactflow__edge-ToolCallingAgent-mf0BN{œdataTypeœ:œToolCallingAgentœ,œidœ:œToolCallingAgent-mf0BNœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-Ag9YG{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-Ag9YGœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "ToolCallingAgent-mf0BN", - "sourceHandle": "{œdataTypeœ: œToolCallingAgentœ, œidœ: œToolCallingAgent-mf0BNœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}", - "target": "ChatOutput-Ag9YG", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-Ag9YGœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" - }, - { - "className": "", - "data": { - "sourceHandle": { - "dataType": "OpenAIModel", - "id": "OpenAIModel-1ioeW", - "name": "model_output", + "dataType": "PythonREPLTool", + "id": "PythonREPLTool-QaSW0", + "name": "api_build_tool", "output_types": [ - "LanguageModel" + "Tool" ] }, "targetHandle": { - "fieldName": "llm", - "id": "ToolCallingAgent-mf0BN", + "fieldName": "tools", + "id": "Agent-7mKwO", "inputTypes": [ - "LanguageModel" + "Tool", + "BaseTool", + "StructuredTool" ], "type": "other" } }, - "id": "reactflow__edge-OpenAIModel-1ioeW{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-1ioeWœ,œnameœ:œmodel_outputœ,œoutput_typesœ:[œLanguageModelœ]}-ToolCallingAgent-mf0BN{œfieldNameœ:œllmœ,œidœ:œToolCallingAgent-mf0BNœ,œinputTypesœ:[œLanguageModelœ],œtypeœ:œotherœ}", - "source": "OpenAIModel-1ioeW", - "sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-1ioeWœ, œnameœ: œmodel_outputœ, œoutput_typesœ: [œLanguageModelœ]}", - "target": "ToolCallingAgent-mf0BN", - "targetHandle": "{œfieldNameœ: œllmœ, œidœ: œToolCallingAgent-mf0BNœ, œinputTypesœ: [œLanguageModelœ], œtypeœ: œotherœ}" + "id": "reactflow__edge-PythonREPLTool-QaSW0{œdataTypeœ:œPythonREPLToolœ,œidœ:œPythonREPLTool-QaSW0œ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-7mKwO{œfieldNameœ:œtoolsœ,œidœ:œAgent-7mKwOœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}", + "source": "PythonREPLTool-QaSW0", + "sourceHandle": "{œdataTypeœ: œPythonREPLToolœ, œidœ: œPythonREPLTool-QaSW0œ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}", + "target": "Agent-7mKwO", + "targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-7mKwOœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}" }, { + "animated": false, "className": "", "data": { "sourceHandle": { "dataType": "CalculatorTool", - "id": "CalculatorTool-Nb4P5", + "id": "CalculatorTool-2c9q3", "name": "api_build_tool", "output_types": [ "Tool" @@ -66,26 +44,28 @@ }, "targetHandle": { "fieldName": "tools", - "id": "ToolCallingAgent-mf0BN", + "id": "Agent-7mKwO", "inputTypes": [ "Tool", - "BaseTool" + "BaseTool", + "StructuredTool" ], "type": "other" } }, - "id": "reactflow__edge-CalculatorTool-Nb4P5{œdataTypeœ:œCalculatorToolœ,œidœ:œCalculatorTool-Nb4P5œ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-ToolCallingAgent-mf0BN{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-mf0BNœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}", - "source": "CalculatorTool-Nb4P5", - "sourceHandle": "{œdataTypeœ: œCalculatorToolœ, œidœ: œCalculatorTool-Nb4P5œ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}", - "target": "ToolCallingAgent-mf0BN", - "targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œToolCallingAgent-mf0BNœ, œinputTypesœ: [œToolœ, œBaseToolœ], œtypeœ: œotherœ}" + "id": "reactflow__edge-CalculatorTool-2c9q3{œdataTypeœ:œCalculatorToolœ,œidœ:œCalculatorTool-2c9q3œ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-Agent-7mKwO{œfieldNameœ:œtoolsœ,œidœ:œAgent-7mKwOœ,œinputTypesœ:[œToolœ,œBaseToolœ,œStructuredToolœ],œtypeœ:œotherœ}", + "source": "CalculatorTool-2c9q3", + "sourceHandle": "{œdataTypeœ: œCalculatorToolœ, œidœ: œCalculatorTool-2c9q3œ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}", + "target": "Agent-7mKwO", + "targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œAgent-7mKwOœ, œinputTypesœ: [œToolœ, œBaseToolœ, œStructuredToolœ], œtypeœ: œotherœ}" }, { + "animated": false, "className": "", "data": { "sourceHandle": { "dataType": "ChatInput", - "id": "ChatInput-X3ARP", + "id": "ChatInput-GA9eO", "name": "message", "output_types": [ "Message" @@ -93,50 +73,51 @@ }, "targetHandle": { "fieldName": "input_value", - "id": "ToolCallingAgent-mf0BN", + "id": "Agent-7mKwO", "inputTypes": [ "Message" ], "type": "str" } }, - "id": "reactflow__edge-ChatInput-X3ARP{œdataTypeœ:œChatInputœ,œidœ:œChatInput-X3ARPœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-ToolCallingAgent-mf0BN{œfieldNameœ:œinput_valueœ,œidœ:œToolCallingAgent-mf0BNœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "ChatInput-X3ARP", - "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-X3ARPœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", - "target": "ToolCallingAgent-mf0BN", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œToolCallingAgent-mf0BNœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-ChatInput-GA9eO{œdataTypeœ:œChatInputœ,œidœ:œChatInput-GA9eOœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Agent-7mKwO{œfieldNameœ:œinput_valueœ,œidœ:œAgent-7mKwOœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "ChatInput-GA9eO", + "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-GA9eOœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", + "target": "Agent-7mKwO", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œAgent-7mKwOœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" }, { + "animated": false, + "className": "", "data": { "sourceHandle": { - "dataType": "PythonREPLTool", - "id": "PythonREPLTool-i922a", - "name": "api_build_tool", + "dataType": "Agent", + "id": "Agent-7mKwO", + "name": "response", "output_types": [ - "Tool" + "Message" ] }, "targetHandle": { - "fieldName": "tools", - "id": "ToolCallingAgent-mf0BN", + "fieldName": "input_value", + "id": "ChatOutput-iwrl6", "inputTypes": [ - "Tool", - "BaseTool" + "Message" ], - "type": "other" + "type": "str" } }, - "id": "reactflow__edge-PythonREPLTool-i922a{œdataTypeœ:œPythonREPLToolœ,œidœ:œPythonREPLTool-i922aœ,œnameœ:œapi_build_toolœ,œoutput_typesœ:[œToolœ]}-ToolCallingAgent-mf0BN{œfieldNameœ:œtoolsœ,œidœ:œToolCallingAgent-mf0BNœ,œinputTypesœ:[œToolœ,œBaseToolœ],œtypeœ:œotherœ}", - "source": "PythonREPLTool-i922a", - "sourceHandle": "{œdataTypeœ: œPythonREPLToolœ, œidœ: œPythonREPLTool-i922aœ, œnameœ: œapi_build_toolœ, œoutput_typesœ: [œToolœ]}", - "target": "ToolCallingAgent-mf0BN", - "targetHandle": "{œfieldNameœ: œtoolsœ, œidœ: œToolCallingAgent-mf0BNœ, œinputTypesœ: [œToolœ, œBaseToolœ], œtypeœ: œotherœ}" + "id": "reactflow__edge-Agent-7mKwO{œdataTypeœ:œAgentœ,œidœ:œAgent-7mKwOœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-iwrl6{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-iwrl6œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "Agent-7mKwO", + "sourceHandle": "{œdataTypeœ: œAgentœ, œidœ: œAgent-7mKwOœ, œnameœ: œresponseœ, œoutput_typesœ: [œMessageœ]}", + "target": "ChatOutput-iwrl6", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-iwrl6œ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" } ], "nodes": [ { "data": { - "id": "ChatInput-X3ARP", + "id": "ChatInput-GA9eO", "node": { "base_classes": [ "Message" @@ -159,7 +140,7 @@ "frozen": false, "icon": "MessagesSquare", "legacy": false, - "lf_version": "1.0.16", + "lf_version": "1.0.19", "metadata": {}, "output_types": [], "outputs": [ @@ -343,23 +324,23 @@ "type": "ChatInput" }, "dragging": false, - "height": 302, - "id": "ChatInput-X3ARP", + "height": 234, + "id": "ChatInput-GA9eO", "position": { - "x": 1760.192972923414, - "y": -191.51901724049213 + "x": 62.413733671682394, + "y": 1065.4595268030878 }, "positionAbsolute": { - "x": 1760.192972923414, - "y": -191.51901724049213 + "x": 62.413733671682394, + "y": 1065.4595268030878 }, "selected": false, "type": "genericNode", - "width": 384 + "width": 320 }, { "data": { - "id": "ChatOutput-Ag9YG", + "id": "ChatOutput-iwrl6", "node": { "base_classes": [ "Message" @@ -382,7 +363,7 @@ "frozen": false, "icon": "MessagesSquare", "legacy": false, - "lf_version": "1.0.16", + "lf_version": "1.0.19", "metadata": {}, "output_types": [], "outputs": [ @@ -544,93 +525,73 @@ "type": "ChatOutput" }, "dragging": false, - "height": 302, - "id": "ChatOutput-Ag9YG", + "height": 234, + "id": "ChatOutput-iwrl6", "position": { - "x": 3968.8870036313238, - "y": 627.770746142633 + "x": 1503.4431369392628, + "y": 552.5679265365819 }, "positionAbsolute": { - "x": 3968.8870036313238, - "y": 627.770746142633 + "x": 1503.4431369392628, + "y": 552.5679265365819 }, - "selected": false, + "selected": true, "type": "genericNode", - "width": 384 + "width": 320 }, { "data": { - "description": "Generates text using OpenAI LLMs.", - "display_name": "OpenAI", - "id": "OpenAIModel-1ioeW", + "id": "CalculatorTool-2c9q3", "node": { "base_classes": [ - "LanguageModel", - "Message" + "Data", + "list", + "Sequence", + "Tool" ], "beta": false, "conditional_paths": [], "custom_fields": {}, - "description": "Generates text using OpenAI LLMs.", - "display_name": "OpenAI", + "description": "Perform basic arithmetic operations on a given expression.", + "display_name": "Calculator", "documentation": "", "edited": false, "field_order": [ - "input_value", - "system_message", - "stream", - "max_tokens", - "model_kwargs", - "json_mode", - "output_schema", - "model_name", - "openai_api_base", - "api_key", - "temperature", - "seed" + "expression" ], "frozen": false, - "icon": "OpenAI", + "icon": "calculator", "legacy": false, - "lf_version": "1.0.16", + "lf_version": "1.0.19", "metadata": {}, + "official": false, "output_types": [], "outputs": [ { "cache": true, - "display_name": "Text", - "method": "text_response", - "name": "text_output", + "display_name": "Data", + "method": "run_model", + "name": "api_run_model", "required_inputs": [ - "input_value", - "stream", - "system_message" + "expression" ], - "selected": "Message", + "selected": "Data", "types": [ - "Message" + "Data" ], "value": "__UNDEFINED__" }, { "cache": true, - "display_name": "Language Model", - "method": "build_model", - "name": "model_output", + "display_name": "Tool", + "method": "build_tool", + "name": "api_build_tool", "required_inputs": [ - "api_key", - "json_mode", - "max_tokens", - "model_kwargs", - "model_name", - "openai_api_base", - "output_schema", - "seed", - "temperature" + "expression" ], - "selected": "LanguageModel", + "selected": "Tool", "types": [ - "LanguageModel" + "Tool" ], "value": "__UNDEFINED__" } @@ -638,25 +599,6 @@ "pinned": false, "template": { "_type": "Component", - "api_key": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", - "dynamic": false, - "info": "The OpenAI API Key to use for the OpenAI model.", - "input_types": [ - "Message" - ], - "load_from_db": true, - "name": "api_key", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "OPENAI_API_KEY" - }, "code": { "advanced": true, "dynamic": true, @@ -673,20 +615,20 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, SecretStrInput, StrInput\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled. [DEPRECATED]\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key).get_secret_value() if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" + "value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._evaluate_expression,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _eval_expr(self, node):\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](self._eval_expr(node.left), self._eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](self._eval_expr(node.operand))\n raise TypeError(node)\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error evaluating expression\")\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n" }, - "input_value": { - "_input_type": "MessageInput", + "expression": { + "_input_type": "MessageTextInput", "advanced": false, - "display_name": "Input", + "display_name": "Expression", "dynamic": false, - "info": "", + "info": "The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').", "input_types": [ "Message" ], "list": false, "load_from_db": false, - "name": "input_value", + "name": "expression", "placeholder": "", "required": false, "show": true, @@ -694,275 +636,235 @@ "trace_as_input": true, "trace_as_metadata": true, "type": "str", - "value": "" + "value": "2+2" + } + } + }, + "type": "CalculatorTool" + }, + "dragging": false, + "height": 302, + "id": "CalculatorTool-2c9q3", + "position": { + "x": 57.99937502347552, + "y": 149.36767868834886 + }, + "positionAbsolute": { + "x": 57.99937502347552, + "y": 149.36767868834886 + }, + "selected": false, + "type": "genericNode", + "width": 320 + }, + { + "data": { + "description": "A tool for running Python code in a REPL environment.", + "display_name": "Python REPL Tool", + "id": "PythonREPLTool-QaSW0", + "node": { + "base_classes": [ + "Data", + "Tool" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "A tool for running Python code in a REPL environment.", + "display_name": "Python REPL Tool", + "documentation": "", + "edited": false, + "field_order": [ + "name", + "description", + "global_imports", + "code" + ], + "frozen": false, + "legacy": false, + "lf_version": "1.0.19", + "metadata": {}, + "output_types": [], + "outputs": [ + { + "cache": true, + "display_name": "Data", + "method": "run_model", + "name": "api_run_model", + "required_inputs": [ + "code", + "description", + "global_imports", + "name" + ], + "selected": "Data", + "types": [ + "Data" + ], + "value": "__UNDEFINED__" }, - "json_mode": { - "_input_type": "BoolInput", + { + "cache": true, + "display_name": "Tool", + "method": "build_tool", + "name": "api_build_tool", + "required_inputs": [ + "code", + "description", + "global_imports", + "name" + ], + "selected": "Tool", + "types": [ + "Tool" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { "advanced": true, - "display_name": "JSON Mode", - "dynamic": false, - "info": "If True, it will output JSON regardless of passing a schema.", + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", "list": false, - "name": "json_mode", + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, "placeholder": "", - "required": false, + "required": true, "show": true, "title_case": false, - "trace_as_metadata": true, - "type": "bool", - "value": false + "type": "code", + "value": "import importlib\n\nfrom langchain.tools import StructuredTool\nfrom langchain_experimental.utilities import PythonREPL\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import StrInput\nfrom langflow.schema import Data\n\n\nclass PythonREPLToolComponent(LCToolComponent):\n display_name = \"Python REPL Tool\"\n description = \"A tool for running Python code in a REPL environment.\"\n name = \"PythonREPLTool\"\n\n inputs = [\n StrInput(\n name=\"name\",\n display_name=\"Tool Name\",\n info=\"The name of the tool.\",\n value=\"python_repl\",\n ),\n StrInput(\n name=\"description\",\n display_name=\"Tool Description\",\n info=\"A description of the tool.\",\n value=\"A Python shell. Use this to execute python commands. \"\n \"Input should be a valid python command. \"\n \"If you want to see the output of a value, you should print it out with `print(...)`.\",\n ),\n StrInput(\n name=\"global_imports\",\n display_name=\"Global Imports\",\n info=\"A comma-separated list of modules to import globally, e.g. 'math,numpy'.\",\n value=\"math\",\n ),\n StrInput(\n name=\"code\",\n display_name=\"Python Code\",\n info=\"The Python code to execute.\",\n value=\"print('Hello, World!')\",\n ),\n ]\n\n class PythonREPLSchema(BaseModel):\n code: str = Field(..., description=\"The Python code to execute.\")\n\n def get_globals(self, global_imports: str | list[str]) -> dict:\n global_dict = {}\n if isinstance(global_imports, str):\n modules = [module.strip() for module in global_imports.split(\",\")]\n elif isinstance(global_imports, list):\n modules = global_imports\n else:\n msg = \"global_imports must be either a string or a list\"\n raise TypeError(msg)\n\n for module in modules:\n try:\n imported_module = importlib.import_module(module)\n global_dict[imported_module.__name__] = imported_module\n except ImportError as e:\n msg = f\"Could not import module {module}\"\n raise ImportError(msg) from e\n return global_dict\n\n def build_tool(self) -> Tool:\n _globals = self.get_globals(self.global_imports)\n python_repl = PythonREPL(_globals=_globals)\n\n def run_python_code(code: str) -> str:\n try:\n return python_repl.run(code)\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error running Python code\")\n return f\"Error: {e}\"\n\n tool = StructuredTool.from_function(\n name=self.name,\n description=self.description,\n func=run_python_code,\n args_schema=self.PythonREPLSchema,\n )\n\n self.status = f\"Python REPL Tool created with global imports: {self.global_imports}\"\n return tool\n\n def run_model(self) -> list[Data]:\n tool = self.build_tool()\n result = tool.run(self.code)\n return [Data(data={\"result\": result})]\n" }, - "max_tokens": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Max Tokens", + "description": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Tool Description", "dynamic": false, - "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "info": "A description of the tool.", "list": false, - "name": "max_tokens", + "load_from_db": false, + "name": "description", "placeholder": "", - "range_spec": { - "max": 128000, - "min": 0, - "step": 0.1, - "step_type": "float" - }, "required": false, "show": true, "title_case": false, "trace_as_metadata": true, - "type": "int", - "value": "" + "type": "str", + "value": "A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`." }, - "model_kwargs": { - "_input_type": "DictInput", - "advanced": true, - "display_name": "Model Kwargs", + "global_imports": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Global Imports", "dynamic": false, - "info": "", + "info": "A comma-separated list of modules to import globally, e.g. 'math,numpy'.", "list": false, - "name": "model_kwargs", + "load_from_db": false, + "name": "global_imports", "placeholder": "", "required": false, "show": true, "title_case": false, - "trace_as_input": true, - "type": "dict", - "value": {} + "trace_as_metadata": true, + "type": "str", + "value": "math" }, - "model_name": { - "_input_type": "DropdownInput", + "name": { + "_input_type": "StrInput", "advanced": false, - "combobox": false, - "display_name": "Model Name", + "display_name": "Tool Name", "dynamic": false, - "info": "", + "info": "The name of the tool.", + "list": false, "load_from_db": false, - "name": "model_name", - "options": [ - "gpt-4o-mini", - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-4", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125" - ], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "str", - "value": "gpt-4o-mini" - }, - "openai_api_base": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "OpenAI API Base", - "dynamic": false, - "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", - "list": false, - "load_from_db": false, - "name": "openai_api_base", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "output_parser": { - "_input_type": "HandleInput", - "advanced": true, - "display_name": "Output Parser", - "dynamic": false, - "info": "The parser to use to parse the output of the model", - "input_types": [ - "OutputParser" - ], - "list": false, - "name": "output_parser", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "output_schema": { - "_input_type": "DictInput", - "advanced": true, - "display_name": "Schema", - "dynamic": false, - "info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]", - "list": true, - "name": "output_schema", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_input": true, - "type": "dict", - "value": {} - }, - "seed": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Seed", - "dynamic": false, - "info": "The seed controls the reproducibility of the job.", - "list": false, - "name": "seed", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "int", - "value": 1 - }, - "stream": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Stream", - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "list": false, - "name": "stream", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "bool", - "value": false - }, - "system_message": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "System Message", - "dynamic": false, - "info": "System message to pass to the model.", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "name": "system_message", + "name": "name", "placeholder": "", "required": false, "show": true, "title_case": false, - "trace_as_input": true, "trace_as_metadata": true, "type": "str", - "value": "" - }, - "temperature": { - "_input_type": "FloatInput", - "advanced": false, - "display_name": "Temperature", - "dynamic": false, - "info": "", - "list": false, - "name": "temperature", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "float", - "value": 0.1 + "value": "python_repl" } } }, - "type": "OpenAIModel" + "type": "PythonREPLTool" }, "dragging": false, - "height": 605, - "id": "OpenAIModel-1ioeW", + "height": 475, + "id": "PythonREPLTool-QaSW0", "position": { - "x": 2538.9919009235173, - "y": 1206.8619086167491 + "x": 63.43047038993518, + "y": 536.9058769625287 }, "positionAbsolute": { - "x": 2538.9919009235173, - "y": 1206.8619086167491 + "x": 63.43047038993518, + "y": 536.9058769625287 }, "selected": false, "type": "genericNode", - "width": 384 + "width": 320 }, { "data": { - "id": "ToolCallingAgent-mf0BN", + "id": "Agent-7mKwO", "node": { "base_classes": [ - "AgentExecutor", "Message" ], "beta": true, + "category": "agents", "conditional_paths": [], "custom_fields": {}, - "description": "Agent that uses tools to perform the given task.", + "description": "Define the agent's instructions, then enter a task to complete using tools.", "display_name": "Agent", "documentation": "", - "edited": true, + "edited": false, "field_order": [ + "agent_llm", + "max_tokens", + "model_kwargs", + "json_mode", + "output_schema", + "model_name", + "openai_api_base", + "api_key", + "temperature", + "seed", + "output_parser", + "system_prompt", + "tools", "input_value", "handle_parsing_errors", "verbose", "max_iterations", - "tools", - "llm", - "system_prompt", - "user_prompt", - "chat_history" + "memory", + "sender", + "sender_name", + "n_messages", + "session_id", + "order", + "template" ], "frozen": false, - "icon": "bot-message-square", - "lf_version": "1.0.16", + "icon": "bot", + "key": "Agent", + "legacy": false, + "lf_version": "1.0.19", + "metadata": {}, "output_types": [], "outputs": [ - { - "cache": true, - "display_name": "Agent", - "hidden": true, - "method": "build_agent", - "name": "agent", - "selected": "AgentExecutor", - "types": [ - "AgentExecutor" - ], - "value": "__UNDEFINED__" - }, { "cache": true, "display_name": "Response", - "method": "message_response", + "method": "get_response", "name": "response", "selected": "Message", "types": [ @@ -974,24 +876,47 @@ "pinned": false, "template": { "_type": "Component", - "chat_history": { - "_input_type": "DataInput", - "advanced": true, - "display_name": "Chat History", + "agent_llm": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "display_name": "Model Provider", "dynamic": false, "info": "", - "input_types": [ - "Data" + "input_types": [], + "name": "agent_llm", + "options": [ + "Azure OpenAI", + "OpenAI", + "Custom" ], - "list": true, - "name": "chat_history", "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, "required": false, "show": true, "title_case": false, - "trace_as_input": true, "trace_as_metadata": true, - "type": "other", + "type": "str", + "value": "OpenAI" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "info": "The OpenAI API Key to use for the OpenAI model.", + "input_types": [ + "Message" + ], + "load_from_db": true, + "name": "api_key", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", "value": "" }, "code": { @@ -1010,7 +935,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Optional, List\n\nfrom langchain.agents import create_tool_calling_agent\nfrom langchain_core.prompts import ChatPromptTemplate, PromptTemplate, HumanMessagePromptTemplate\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.inputs import MultilineInput\nfrom langflow.inputs.inputs import HandleInput, DataInput\nfrom langflow.schema import Data\n\n\nclass ToolCallingAgentComponent(LCToolsAgentComponent):\n display_name: str = \"Tool Calling Agent\"\n description: str = \"Agent that uses tools\"\n icon = \"bot-message-square\"\n beta = True\n name = \"ToolCallingAgent\"\n\n inputs = LCToolsAgentComponent._base_inputs + [\n HandleInput(name=\"llm\", display_name=\"Language Model\", input_types=[\"LanguageModel\"], required=True),\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"System Prompt\",\n info=\"System prompt for the agent.\",\n value=\"You are a helpful assistant\",\n ),\n MultilineInput(\n name=\"user_prompt\", display_name=\"Prompt\", info=\"This prompt must contain 'input' key.\", value=\"{input}\"\n ),\n DataInput(name=\"chat_history\", display_name=\"Chat History\", is_list=True, advanced=True),\n ]\n\n def get_chat_history_data(self) -> Optional[List[Data]]:\n return self.chat_history\n\n def create_agent_runnable(self):\n if \"input\" not in self.user_prompt:\n raise ValueError(\"Prompt must contain 'input' key.\")\n messages = [\n (\"system\", self.system_prompt),\n (\"placeholder\", \"{chat_history}\"),\n HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=[\"input\"], template=self.user_prompt)),\n (\"placeholder\", \"{agent_scratchpad}\"),\n ]\n prompt = ChatPromptTemplate.from_messages(messages)\n return create_tool_calling_agent(self.llm, self.tools, prompt)\n" + "value": "from langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.components.agents.tool_calling import ToolCallingAgentComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.models.azure_openai import AzureChatOpenAIComponent\nfrom langflow.components.models.openai import OpenAIModelComponent\nfrom langflow.io import (\n DropdownInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = True\n name = \"Agent\"\n\n azure_inputs = [\n set_advanced_true(component_input) if component_input.name == \"temperature\" else component_input\n for component_input in AzureChatOpenAIComponent().inputs\n if component_input.name not in [input_field.name for input_field in LCModelComponent._base_inputs]\n ]\n openai_inputs = [\n set_advanced_true(component_input) if component_input.name == \"temperature\" else component_input\n for component_input in OpenAIModelComponent().inputs\n if component_input.name not in [input_field.name for input_field in LCModelComponent._base_inputs]\n ]\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n options=[\"Azure OpenAI\", \"OpenAI\", \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n refresh_button=True,\n input_types=[],\n ),\n *openai_inputs,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"get_response\")]\n\n async def get_response(self) -> Message:\n llm_model = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.chat_history = self.get_memory_data()\n\n agent = ToolCallingAgentComponent().set(\n llm=llm_model,\n tools=[self.tools],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n\n return await agent.message_response()\n\n def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n\n return MemoryComponent().set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n try:\n if self.agent_llm == \"OpenAI\":\n return self._build_llm_model(OpenAIModelComponent(), self.openai_inputs)\n if self.agent_llm == \"Azure OpenAI\":\n return self._build_llm_model(AzureChatOpenAIComponent(), self.azure_inputs, prefix=\"azure_param_\")\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n return component.set(\n **{component_input.name: getattr(self, f\"{prefix}{component_input.name}\") for component_input in inputs}\n ).build_model()\n\n def delete_fields(self, build_config, fields):\n for field in fields:\n build_config.pop(field, None)\n\n def update_build_config(self, build_config: dotdict, field_value: str, field_name: str | None = None):\n if field_name == \"agent_llm\":\n openai_fields = {component_input.name: component_input for component_input in self.openai_inputs}\n azure_fields = {\n f\"azure_param_{component_input.name}\": component_input for component_input in self.azure_inputs\n }\n\n if field_value == \"OpenAI\":\n self.delete_fields(build_config, {**azure_fields})\n if not any(field in build_config for field in openai_fields):\n build_config.update(openai_fields)\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config = self.update_input_types(build_config)\n\n elif field_value == \"Azure OpenAI\":\n self.delete_fields(build_config, {**openai_fields})\n build_config.update(azure_fields)\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config = self.update_input_types(build_config)\n elif field_value == \"Custom\":\n self.delete_fields(build_config, {**openai_fields})\n self.delete_fields(build_config, {**azure_fields})\n new_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[\"Azure OpenAI\", \"OpenAI\", \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": new_component.to_dict()})\n build_config = self.update_input_types(build_config)\n default_keys = [\"code\", \"_type\", \"agent_llm\", \"tools\", \"input_value\"]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n return build_config\n\n def update_input_types(self, build_config):\n for key, value in build_config.items():\n # Check if the value is a dictionary\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n # Check if the value has an attribute 'input_types' and it is None\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n" }, "handle_parsing_errors": { "_input_type": "BoolInput", @@ -1049,24 +974,21 @@ "type": "str", "value": "" }, - "llm": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Language Model", + "json_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "JSON Mode", "dynamic": false, - "info": "", - "input_types": [ - "LanguageModel" - ], + "info": "If True, it will output JSON regardless of passing a schema.", "list": false, - "name": "llm", + "name": "json_mode", "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, "trace_as_metadata": true, - "type": "other", - "value": "" + "type": "bool", + "value": false }, "max_iterations": { "_input_type": "IntInput", @@ -1084,41 +1006,39 @@ "type": "int", "value": 15 }, - "system_prompt": { - "_input_type": "MultilineInput", - "advanced": false, - "display_name": "System Prompt", + "max_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Tokens", "dynamic": false, - "info": "System prompt for the agent.", - "input_types": [ - "Message" - ], + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", "list": false, - "load_from_db": false, - "multiline": true, - "name": "system_prompt", + "name": "max_tokens", "placeholder": "", + "range_spec": { + "max": 128000, + "min": 0, + "step": 0.1, + "step_type": "float" + }, "required": false, "show": true, "title_case": false, - "trace_as_input": true, "trace_as_metadata": true, - "type": "str", - "value": "You are an Amazing Agent that can use the tools provided to you and answer the question " + "type": "int", + "value": "" }, - "tools": { + "memory": { "_input_type": "HandleInput", - "advanced": false, - "display_name": "Tools", + "advanced": true, + "display_name": "External Memory", "dynamic": false, - "info": "", + "info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.", "input_types": [ - "Tool", - "BaseTool" + "BaseChatMessageHistory" ], - "list": true, - "load_from_db": false, - "name": "tools", + "list": false, + "name": "memory", "placeholder": "", "required": false, "show": true, @@ -1127,153 +1047,184 @@ "type": "other", "value": "" }, - "user_prompt": { - "_input_type": "MultilineInput", - "advanced": false, - "display_name": "Prompt", + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", "dynamic": false, - "info": "This prompt must contain 'input' key.", - "input_types": [ - "Message" - ], + "info": "", "list": false, - "load_from_db": false, - "multiline": true, - "name": "user_prompt", + "name": "model_kwargs", "placeholder": "", "required": false, "show": true, "title_case": false, "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "{input}" + "type": "dict", + "value": {} }, - "verbose": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Verbose", - "dynamic": false, - "info": "", - "list": false, - "name": "verbose", + "model_name": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "display_name": "Model Name", + "dynamic": false, + "info": "", + "name": "model_name", + "options": [ + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], "placeholder": "", "required": false, "show": true, "title_case": false, "trace_as_metadata": true, - "type": "bool", - "value": true - } - } - }, - "type": "ToolCallingAgent" - }, - "dragging": false, - "height": 598, - "id": "ToolCallingAgent-mf0BN", - "position": { - "x": 3276.3854573966964, - "y": 516.3304705434241 - }, - "positionAbsolute": { - "x": 3276.3854573966964, - "y": 516.3304705434241 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "id": "CalculatorTool-Nb4P5", - "node": { - "base_classes": [ - "Data", - "list", - "Sequence", - "Tool" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Perform basic arithmetic operations on a given expression.", - "display_name": "Calculator", - "documentation": "", - "edited": false, - "field_order": [ - "expression" - ], - "frozen": false, - "icon": "calculator", - "legacy": false, - "lf_version": "1.0.16", - "metadata": {}, - "official": false, - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Data", - "method": "run_model", - "name": "api_run_model", - "required_inputs": [ - "expression" - ], - "selected": "Data", - "types": [ - "Data" - ], - "value": "__UNDEFINED__" + "type": "str", + "value": "gpt-4o-mini" }, - { - "cache": true, - "display_name": "Tool", - "method": "build_tool", - "name": "api_build_tool", - "required_inputs": [ - "expression" + "n_messages": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Number of Messages", + "dynamic": false, + "info": "Number of messages to retrieve.", + "list": false, + "name": "n_messages", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "int", + "value": 100 + }, + "openai_api_base": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "OpenAI API Base", + "dynamic": false, + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", + "list": false, + "load_from_db": false, + "name": "openai_api_base", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "order": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "display_name": "Order", + "dynamic": false, + "info": "Order of the messages.", + "name": "order", + "options": [ + "Ascending", + "Descending" ], - "selected": "Tool", - "types": [ - "Tool" + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "str", + "value": "Ascending" + }, + "output_parser": { + "_input_type": "HandleInput", + "advanced": true, + "display_name": "Output Parser", + "dynamic": false, + "info": "The parser to use to parse the output of the model", + "input_types": [ + "OutputParser" ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { + "list": false, + "name": "output_parser", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "other", + "value": "" + }, + "output_schema": { + "_input_type": "DictInput", "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", + "display_name": "Schema", + "dynamic": false, + "info": "The schema for the Output of the model. You must pass the word JSON in the prompt. If left blank, JSON mode will be disabled. [DEPRECATED]", + "list": true, + "name": "output_schema", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_input": true, + "type": "dict", + "value": {} + }, + "seed": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Seed", + "dynamic": false, + "info": "The seed controls the reproducibility of the job.", "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, + "name": "seed", "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, - "type": "code", - "value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._evaluate_expression,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _eval_expr(self, node):\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](self._eval_expr(node.left), self._eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](self._eval_expr(node.operand))\n raise TypeError(node)\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error evaluating expression\")\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n" + "trace_as_metadata": true, + "type": "int", + "value": 1 }, - "expression": { + "sender": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "display_name": "Sender Type", + "dynamic": false, + "info": "Filter by sender type.", + "name": "sender", + "options": [ + "Machine", + "User", + "Machine and User" + ], + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "str", + "value": "Machine and User" + }, + "sender_name": { "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Expression", + "advanced": true, + "display_name": "Sender Name", "dynamic": false, - "info": "The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').", + "info": "Filter by sender name.", "input_types": [ "Message" ], "list": false, "load_from_db": false, - "name": "expression", + "name": "sender_name", "placeholder": "", "required": false, "show": true, @@ -1281,194 +1232,158 @@ "trace_as_input": true, "trace_as_metadata": true, "type": "str", - "value": "2+2" - } - } - }, - "type": "CalculatorTool" - }, - "dragging": false, - "height": 375, - "id": "CalculatorTool-Nb4P5", - "position": { - "x": 2330.062076024461, - "y": 429.6717346334192 - }, - "positionAbsolute": { - "x": 2330.062076024461, - "y": 429.6717346334192 - }, - "selected": false, - "type": "genericNode", - "width": 384 - }, - { - "data": { - "description": "A tool for running Python code in a REPL environment.", - "display_name": "Python REPL Tool", - "id": "PythonREPLTool-i922a", - "node": { - "base_classes": [ - "Data", - "Tool" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "A tool for running Python code in a REPL environment.", - "display_name": "Python REPL Tool", - "documentation": "", - "edited": false, - "field_order": [ - "name", - "description", - "global_imports", - "code" - ], - "frozen": false, - "legacy": false, - "metadata": {}, - "output_types": [], - "outputs": [ - { - "cache": true, - "display_name": "Data", - "method": "run_model", - "name": "api_run_model", - "required_inputs": [ - "code", - "description", - "global_imports", - "name" - ], - "selected": "Data", - "types": [ - "Data" - ], - "value": "__UNDEFINED__" + "value": "" }, - { - "cache": true, - "display_name": "Tool", - "method": "build_tool", - "name": "api_build_tool", - "required_inputs": [ - "code", - "description", - "global_imports", - "name" - ], - "selected": "Tool", - "types": [ - "Tool" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { + "session_id": { + "_input_type": "MessageTextInput", "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", + "display_name": "Session ID", + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "input_types": [ + "Message" + ], "list": false, "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, + "name": "session_id", "placeholder": "", - "required": true, + "required": false, "show": true, "title_case": false, - "type": "code", - "value": "import importlib\n\nfrom langchain.tools import StructuredTool\nfrom langchain_experimental.utilities import PythonREPL\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import StrInput\nfrom langflow.schema import Data\n\n\nclass PythonREPLToolComponent(LCToolComponent):\n display_name = \"Python REPL Tool\"\n description = \"A tool for running Python code in a REPL environment.\"\n name = \"PythonREPLTool\"\n\n inputs = [\n StrInput(\n name=\"name\",\n display_name=\"Tool Name\",\n info=\"The name of the tool.\",\n value=\"python_repl\",\n ),\n StrInput(\n name=\"description\",\n display_name=\"Tool Description\",\n info=\"A description of the tool.\",\n value=\"A Python shell. Use this to execute python commands. \"\n \"Input should be a valid python command. \"\n \"If you want to see the output of a value, you should print it out with `print(...)`.\",\n ),\n StrInput(\n name=\"global_imports\",\n display_name=\"Global Imports\",\n info=\"A comma-separated list of modules to import globally, e.g. 'math,numpy'.\",\n value=\"math\",\n ),\n StrInput(\n name=\"code\",\n display_name=\"Python Code\",\n info=\"The Python code to execute.\",\n value=\"print('Hello, World!')\",\n ),\n ]\n\n class PythonREPLSchema(BaseModel):\n code: str = Field(..., description=\"The Python code to execute.\")\n\n def get_globals(self, global_imports: str | list[str]) -> dict:\n global_dict = {}\n if isinstance(global_imports, str):\n modules = [module.strip() for module in global_imports.split(\",\")]\n elif isinstance(global_imports, list):\n modules = global_imports\n else:\n msg = \"global_imports must be either a string or a list\"\n raise TypeError(msg)\n\n for module in modules:\n try:\n imported_module = importlib.import_module(module)\n global_dict[imported_module.__name__] = imported_module\n except ImportError as e:\n msg = f\"Could not import module {module}\"\n raise ImportError(msg) from e\n return global_dict\n\n def build_tool(self) -> Tool:\n _globals = self.get_globals(self.global_imports)\n python_repl = PythonREPL(_globals=_globals)\n\n def run_python_code(code: str) -> str:\n try:\n return python_repl.run(code)\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error running Python code\")\n return f\"Error: {e}\"\n\n tool = StructuredTool.from_function(\n name=self.name,\n description=self.description,\n func=run_python_code,\n args_schema=self.PythonREPLSchema,\n )\n\n self.status = f\"Python REPL Tool created with global imports: {self.global_imports}\"\n return tool\n\n def run_model(self) -> list[Data]:\n tool = self.build_tool()\n result = tool.run(self.code)\n return [Data(data={\"result\": result})]\n" + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" }, - "description": { - "_input_type": "StrInput", + "system_prompt": { + "_input_type": "MultilineInput", "advanced": false, - "display_name": "Tool Description", + "display_name": "Agent Instructions", "dynamic": false, - "info": "A description of the tool.", + "info": "Initial instructions and context provided to guide the agent's behavior.", + "input_types": [ + "Message" + ], "list": false, "load_from_db": false, - "name": "description", + "multiline": true, + "name": "system_prompt", "placeholder": "", "required": false, "show": true, "title_case": false, + "trace_as_input": true, "trace_as_metadata": true, "type": "str", - "value": "A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`." + "value": "You are a helpful assistant that can use tools to answer questions and perform tasks." }, - "global_imports": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Global Imports", + "temperature": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Temperature", "dynamic": false, - "info": "A comma-separated list of modules to import globally, e.g. 'math,numpy'.", + "info": "", + "list": false, + "name": "temperature", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "float", + "value": 0.1 + }, + "template": { + "_input_type": "MultilineInput", + "advanced": true, + "display_name": "Template", + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.", + "input_types": [ + "Message" + ], "list": false, "load_from_db": false, - "name": "global_imports", + "multiline": true, + "name": "template", "placeholder": "", "required": false, "show": true, "title_case": false, + "trace_as_input": true, "trace_as_metadata": true, "type": "str", - "value": "math" + "value": "{sender_name}: {text}" }, - "name": { - "_input_type": "StrInput", + "tools": { + "_input_type": "HandleInput", "advanced": false, - "display_name": "Tool Name", + "display_name": "Tools", "dynamic": false, - "info": "The name of the tool.", + "info": "", + "input_types": [ + "Tool", + "BaseTool", + "StructuredTool" + ], + "list": true, + "name": "tools", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "other", + "value": "" + }, + "verbose": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Verbose", + "dynamic": false, + "info": "", "list": false, - "load_from_db": false, - "name": "name", + "name": "verbose", "placeholder": "", "required": false, "show": true, "title_case": false, "trace_as_metadata": true, - "type": "str", - "value": "python_repl" + "type": "bool", + "value": true } } }, - "type": "PythonREPLTool" + "type": "Agent" }, "dragging": false, - "height": 547, - "id": "PythonREPLTool-i922a", + "height": 680, + "id": "Agent-7mKwO", "position": { - "x": 1763.1630547496572, - "y": 791.8164465037205 + "x": 966.8637275749741, + "y": 198.5272428958864 }, "positionAbsolute": { - "x": 1763.1630547496572, - "y": 791.8164465037205 + "x": 966.8637275749741, + "y": 198.5272428958864 }, - "selected": true, + "selected": false, "type": "genericNode", - "width": 384 + "width": 320 } ], "viewport": { - "x": -796.2952218140445, - "y": 174.7919632061971, - "zoom": 0.6144692758797546 + "x": 147.77504195332347, + "y": 14.254381863292224, + "zoom": 0.5119423065126806 } }, "description": "Single Agent Flow to get you started. This flow contains a calculator and a Python REPL tool, that could be used by our tool calling agent.", "endpoint_name": null, "icon": "Users", - "id": "beda74a3-7e03-4c14-a148-a7740e810dbf", + "id": "ad7caf8a-3357-4a2c-81d5-90c0ffb4ad3f", "is_component": false, - "last_tested_version": "1.0.17", + "last_tested_version": "1.0.19", "name": "Simple Agent", "tags": [ "agents", diff --git a/src/backend/tests/unit/components/agents/__init__.py b/src/backend/tests/unit/components/agents/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/backend/tests/unit/components/agents/test_agent_component.py b/src/backend/tests/unit/components/agents/test_agent_component.py new file mode 100644 index 00000000000..2d999cdbd6a --- /dev/null +++ b/src/backend/tests/unit/components/agents/test_agent_component.py @@ -0,0 +1,29 @@ +import os + +import pytest +from langflow.components.agents.agent import AgentComponent +from langflow.components.tools.calculator import CalculatorToolComponent + + +@pytest.mark.api_key_required +@pytest.mark.asyncio +async def test_agent_component_with_calculator(): + # Mock inputs + tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool + input_value = "What is 2 + 2?" + + api_key = os.environ["OPENAI_API_KEY"] + temperature = 0.1 + + # Initialize the AgentComponent with mocked inputs + agent = AgentComponent( + tools=tools, + input_value=input_value, + api_key=api_key, + model_name="gpt-4o", + llm_type="OpenAI", + temperature=temperature, + ) + + response = await agent.get_response() + assert "4" in response.data.get("text") diff --git a/src/backend/tests/unit/components/agents/test_tool_calling_agent.py b/src/backend/tests/unit/components/agents/test_tool_calling_agent.py new file mode 100644 index 00000000000..312d29cf3bb --- /dev/null +++ b/src/backend/tests/unit/components/agents/test_tool_calling_agent.py @@ -0,0 +1,30 @@ +import os + +import pytest +from langflow.components.agents.tool_calling import ToolCallingAgentComponent +from langflow.components.models.openai import OpenAIModelComponent +from langflow.components.tools.calculator import CalculatorToolComponent + + +@pytest.mark.api_key_required +@pytest.mark.asyncio +async def test_tool_calling_agent_component(): + tools = [CalculatorToolComponent().build_tool()] # Use the Calculator component as a tool + input_value = "What is 2 + 2?" + chat_history = [] + api_key = os.environ["OPENAI_API_KEY"] + temperature = 0.1 + + # Default OpenAI Model Component + llm_component = OpenAIModelComponent().set( + api_key=api_key, + temperature=temperature, + ) + llm = llm_component.build_model() + + agent = ToolCallingAgentComponent() + agent.set(llm=llm, tools=[tools], chat_history=chat_history, input_value=input_value) + + # Chat output + response = await agent.message_response() + assert "4" in response.data.get("text") diff --git a/src/frontend/tests/core/integrations/Simple Agent.spec.ts b/src/frontend/tests/core/integrations/Simple Agent.spec.ts index b01e8b7f725..183ab51c9cf 100644 --- a/src/frontend/tests/core/integrations/Simple Agent.spec.ts +++ b/src/frontend/tests/core/integrations/Simple Agent.spec.ts @@ -77,6 +77,8 @@ test("Simple Agent", async ({ page }) => { .getByTestId("popover-anchor-input-api_key") .fill(process.env.OPENAI_API_KEY ?? ""); + await page.getByTestId("fit_view").click(); + await page.getByTestId("dropdown_str_model_name").click(); await page.getByTestId("gpt-4o-1-option").click();