diff --git a/src/backend/base/langflow/base/astra_assistants/__init__.py b/src/backend/base/langflow/base/astra_assistants/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/backend/base/langflow/components/astra_assistants/util.py b/src/backend/base/langflow/base/astra_assistants/util.py similarity index 100% rename from src/backend/base/langflow/components/astra_assistants/util.py rename to src/backend/base/langflow/base/astra_assistants/util.py diff --git a/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py b/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py index aec2e9ddda7..d0ff8077759 100644 --- a/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py +++ b/src/backend/base/langflow/components/astra_assistants/astra_assistant_manager.py @@ -2,7 +2,7 @@ from astra_assistants.astra_assistants_manager import AssistantManager -from langflow.components.astra_assistants.util import ( +from langflow.base.astra_assistants.util import ( get_patched_openai_client, litellm_model_names, tool_names, diff --git a/src/backend/base/langflow/components/astra_assistants/create_assistant.py b/src/backend/base/langflow/components/astra_assistants/create_assistant.py index e69a2b7f08a..0221f53a3c8 100644 --- a/src/backend/base/langflow/components/astra_assistants/create_assistant.py +++ b/src/backend/base/langflow/components/astra_assistants/create_assistant.py @@ -1,4 +1,4 @@ -from langflow.components.astra_assistants.util import get_patched_openai_client +from langflow.base.astra_assistants.util import get_patched_openai_client from langflow.custom.custom_component.component_with_cache import ComponentWithCache from langflow.inputs import MultilineInput, StrInput from langflow.schema.message import Message diff --git a/src/backend/base/langflow/components/astra_assistants/create_thread.py b/src/backend/base/langflow/components/astra_assistants/create_thread.py index 7ddaab3056f..7aa40e62376 100644 --- a/src/backend/base/langflow/components/astra_assistants/create_thread.py +++ b/src/backend/base/langflow/components/astra_assistants/create_thread.py @@ -1,4 +1,4 @@ -from langflow.components.astra_assistants.util import get_patched_openai_client +from langflow.base.astra_assistants.util import get_patched_openai_client from langflow.custom.custom_component.component_with_cache import ComponentWithCache from langflow.inputs import MultilineInput from langflow.schema.message import Message diff --git a/src/backend/base/langflow/components/astra_assistants/get_assistant.py b/src/backend/base/langflow/components/astra_assistants/get_assistant.py index c77a3b2be67..1db2afd510f 100644 --- a/src/backend/base/langflow/components/astra_assistants/get_assistant.py +++ b/src/backend/base/langflow/components/astra_assistants/get_assistant.py @@ -1,4 +1,4 @@ -from langflow.components.astra_assistants.util import get_patched_openai_client +from langflow.base.astra_assistants.util import get_patched_openai_client from langflow.custom.custom_component.component_with_cache import ComponentWithCache from langflow.inputs import MultilineInput, StrInput from langflow.schema.message import Message diff --git a/src/backend/base/langflow/components/astra_assistants/list_assistants.py b/src/backend/base/langflow/components/astra_assistants/list_assistants.py index 2db5e106446..762e16c3c25 100644 --- a/src/backend/base/langflow/components/astra_assistants/list_assistants.py +++ b/src/backend/base/langflow/components/astra_assistants/list_assistants.py @@ -1,4 +1,4 @@ -from langflow.components.astra_assistants.util import get_patched_openai_client +from langflow.base.astra_assistants.util import get_patched_openai_client from langflow.custom.custom_component.component_with_cache import ComponentWithCache from langflow.schema.message import Message from langflow.template.field.base import Output diff --git a/src/backend/base/langflow/components/astra_assistants/run.py b/src/backend/base/langflow/components/astra_assistants/run.py index 845b4c070b1..70d1166679e 100644 --- a/src/backend/base/langflow/components/astra_assistants/run.py +++ b/src/backend/base/langflow/components/astra_assistants/run.py @@ -4,7 +4,7 @@ from openai import OpenAI from openai.lib.streaming import AssistantEventHandler -from langflow.components.astra_assistants.util import get_patched_openai_client +from langflow.base.astra_assistants.util import get_patched_openai_client from langflow.custom.custom_component.component_with_cache import ComponentWithCache from langflow.inputs import MultilineInput from langflow.schema import dotdict @@ -12,10 +12,6 @@ from langflow.template import Output -class AssistantsRunError(Exception): - """Error running assistant""" - - class AssistantsRun(ComponentWithCache): display_name = "Run Assistant" description = "Executes an Assistant Run against a thread" @@ -101,3 +97,7 @@ def on_exception(self, exception: Exception) -> None: print(e) msg = f"Error running assistant: {e}" raise AssistantsRunError(msg) from e + + +class AssistantsRunError(Exception): + """AssistantsRun error""" diff --git a/src/backend/base/langflow/components/langchain_utilities/SpiderTool.py b/src/backend/base/langflow/components/langchain_utilities/SpiderTool.py index 120e8a410b7..7572ac6ff33 100644 --- a/src/backend/base/langflow/components/langchain_utilities/SpiderTool.py +++ b/src/backend/base/langflow/components/langchain_utilities/SpiderTool.py @@ -6,10 +6,6 @@ from langflow.schema import Data -class SpiderToolError(Exception): - """SpiderTool error""" - - class SpiderTool(Component): display_name: str = "Spider Web Crawler & Scraper" description: str = "Spider API for web crawling and scraping." @@ -130,3 +126,7 @@ def crawl(self) -> list[Data]: else: records.append(Data(data={"content": record["content"], "url": record["url"]})) return records + + +class SpiderToolError(Exception): + """SpiderTool error""" diff --git a/src/backend/base/langflow/components/toolkits/Metaphor.py b/src/backend/base/langflow/components/toolkits/Metaphor.py index 66caa7b2bb5..d2ce8010bdb 100644 --- a/src/backend/base/langflow/components/toolkits/Metaphor.py +++ b/src/backend/base/langflow/components/toolkits/Metaphor.py @@ -1,36 +1,51 @@ -from langchain_community.agent_toolkits.base import BaseToolkit -from langchain_core.tools import Tool, tool +from langchain_core.tools import tool from metaphor_python import Metaphor -from langflow.custom import CustomComponent +from langflow.custom import Component +from langflow.field_typing import Tool +from langflow.io import BoolInput, IntInput, Output, SecretStrInput -class MetaphorToolkit(CustomComponent): - display_name: str = "Metaphor" - description: str = "Metaphor Toolkit" +class MetaphorToolkit(Component): + display_name = "Metaphor" + description = "Metaphor Toolkit for search and content retrieval" documentation = "https://python.langchain.com/docs/integrations/tools/metaphor_search" - beta: bool = True - name = "Metaphor" - # api key should be password = True - field_config = { - "metaphor_api_key": {"display_name": "Metaphor API Key", "password": True}, - "code": {"advanced": True}, - } - - def build( - self, - metaphor_api_key: str, - use_autoprompt: bool = True, - search_num_results: int = 5, - similar_num_results: int = 5, - ) -> Tool | BaseToolkit: - # If documents, then we need to create a Vectara instance using .from_documents - client = Metaphor(api_key=metaphor_api_key) + beta = True + + inputs = [ + SecretStrInput( + name="metaphor_api_key", + display_name="Metaphor API Key", + password=True, + ), + BoolInput( + name="use_autoprompt", + display_name="Use Autoprompt", + value=True, + ), + IntInput( + name="search_num_results", + display_name="Search Number of Results", + value=5, + ), + IntInput( + name="similar_num_results", + display_name="Similar Number of Results", + value=5, + ), + ] + + outputs = [ + Output(name="tools", display_name="Tools", method="build_toolkit"), + ] + + def build_toolkit(self) -> Tool: + client = Metaphor(api_key=self.metaphor_api_key) @tool def search(query: str): """Call search engine with a query.""" - return client.search(query, use_autoprompt=use_autoprompt, num_results=search_num_results) + return client.search(query, use_autoprompt=self.use_autoprompt, num_results=self.search_num_results) @tool def get_contents(ids: list[str]): @@ -46,6 +61,6 @@ def find_similar(url: str): The url passed in should be a URL returned from `search` """ - return client.find_similar(url, num_results=similar_num_results) + return client.find_similar(url, num_results=self.similar_num_results) return [search, get_contents, find_similar] diff --git a/src/backend/base/langflow/custom/custom_component/component.py b/src/backend/base/langflow/custom/custom_component/component.py index 86ca415ea6e..d1ac93be07f 100644 --- a/src/backend/base/langflow/custom/custom_component/component.py +++ b/src/backend/base/langflow/custom/custom_component/component.py @@ -8,7 +8,6 @@ import nanoid import yaml -from loguru import logger from pydantic import BaseModel from langflow.base.tools.constants import TOOL_OUTPUT_NAME @@ -340,7 +339,6 @@ def _set_output_required_inputs(self): source_code = inspect.getsource(method) ast_tree = ast.parse(dedent(source_code)) except Exception: # noqa: BLE001 - logger.opt(exception=True).debug(f"Could not get source code for method {method}") source_code = self._code ast_tree = ast.parse(dedent(source_code)) diff --git a/src/backend/base/langflow/custom/directory_reader/directory_reader.py b/src/backend/base/langflow/custom/directory_reader/directory_reader.py index 4a7c7a54c0b..65d973ec5da 100644 --- a/src/backend/base/langflow/custom/directory_reader/directory_reader.py +++ b/src/backend/base/langflow/custom/directory_reader/directory_reader.py @@ -78,7 +78,7 @@ def filter_loaded_components(self, data: dict, with_errors: bool) -> dict: component_tuple = (*build_component(component), component) components.append(component_tuple) except Exception: # noqa: BLE001 - logger.opt(exception=True).debug(f"Error while loading component {component['name']}") + logger.debug(f"Error while loading component {component['name']} from {component['file']}") continue items.append({"name": menu["name"], "path": menu["path"], "components": components}) filtered = [menu for menu in items if menu["components"]] diff --git a/src/backend/base/langflow/graph/vertex/base.py b/src/backend/base/langflow/graph/vertex/base.py index 470880c212a..f7891a52faa 100644 --- a/src/backend/base/langflow/graph/vertex/base.py +++ b/src/backend/base/langflow/graph/vertex/base.py @@ -370,9 +370,12 @@ def _build_params(self): val = field.get("value") if field.get("type") == "code": try: - params[field_name] = ast.literal_eval(val) if val else None + if field_name == "code": + params[field_name] = val + else: + params[field_name] = ast.literal_eval(val) if val else None except Exception: # noqa: BLE001 - logger.opt(exception=True).debug(f"Error evaluating code for {field_name}") + logger.debug(f"Error evaluating code for {field_name}") params[field_name] = val elif field.get("type") in ["dict", "NestedDict"]: # When dict comes from the frontend it comes as a diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json b/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json index 6bfab213233..88c6dd14aca 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Agent Flow.json @@ -670,7 +670,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "_input_type": "MessageInput", @@ -1256,7 +1256,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._evaluate_expression,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n def eval_expr(node):\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](eval_expr(node.operand))\n raise TypeError(node)\n\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n" + "value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._evaluate_expression,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n def eval_expr(node):\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](eval_expr(node.operand))\n raise TypeError(node)\n\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error evaluating expression\")\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n" }, "expression": { "_input_type": "MessageTextInput", @@ -1379,7 +1379,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import importlib\n\nfrom langchain.tools import StructuredTool\nfrom langchain_experimental.utilities import PythonREPL\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import StrInput\nfrom langflow.schema import Data\n\n\nclass PythonREPLToolComponent(LCToolComponent):\n display_name = \"Python REPL Tool\"\n description = \"A tool for running Python code in a REPL environment.\"\n name = \"PythonREPLTool\"\n\n inputs = [\n StrInput(\n name=\"name\",\n display_name=\"Tool Name\",\n info=\"The name of the tool.\",\n value=\"python_repl\",\n ),\n StrInput(\n name=\"description\",\n display_name=\"Tool Description\",\n info=\"A description of the tool.\",\n value=\"A Python shell. Use this to execute python commands. \"\n \"Input should be a valid python command. \"\n \"If you want to see the output of a value, you should print it out with `print(...)`.\",\n ),\n StrInput(\n name=\"global_imports\",\n display_name=\"Global Imports\",\n info=\"A comma-separated list of modules to import globally, e.g. 'math,numpy'.\",\n value=\"math\",\n ),\n StrInput(\n name=\"code\",\n display_name=\"Python Code\",\n info=\"The Python code to execute.\",\n value=\"print('Hello, World!')\",\n ),\n ]\n\n class PythonREPLSchema(BaseModel):\n code: str = Field(..., description=\"The Python code to execute.\")\n\n def get_globals(self, global_imports: str | list[str]) -> dict:\n global_dict = {}\n if isinstance(global_imports, str):\n modules = [module.strip() for module in global_imports.split(\",\")]\n elif isinstance(global_imports, list):\n modules = global_imports\n else:\n msg = \"global_imports must be either a string or a list\"\n raise ValueError(msg)\n\n for module in modules:\n try:\n imported_module = importlib.import_module(module)\n global_dict[imported_module.__name__] = imported_module\n except ImportError as e:\n msg = f\"Could not import module {module}\"\n raise ImportError(msg) from e\n return global_dict\n\n def build_tool(self) -> Tool:\n _globals = self.get_globals(self.global_imports)\n python_repl = PythonREPL(_globals=_globals)\n\n def run_python_code(code: str) -> str:\n try:\n return python_repl.run(code)\n except Exception as e:\n return f\"Error: {e}\"\n\n tool = StructuredTool.from_function(\n name=self.name,\n description=self.description,\n func=run_python_code,\n args_schema=self.PythonREPLSchema,\n )\n\n self.status = f\"Python REPL Tool created with global imports: {self.global_imports}\"\n return tool\n\n def run_model(self) -> list[Data]:\n tool = self.build_tool()\n result = tool.run(self.code)\n return [Data(data={\"result\": result})]\n" + "value": "import importlib\n\nfrom langchain.tools import StructuredTool\nfrom langchain_experimental.utilities import PythonREPL\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import StrInput\nfrom langflow.schema import Data\n\n\nclass PythonREPLToolComponent(LCToolComponent):\n display_name = \"Python REPL Tool\"\n description = \"A tool for running Python code in a REPL environment.\"\n name = \"PythonREPLTool\"\n\n inputs = [\n StrInput(\n name=\"name\",\n display_name=\"Tool Name\",\n info=\"The name of the tool.\",\n value=\"python_repl\",\n ),\n StrInput(\n name=\"description\",\n display_name=\"Tool Description\",\n info=\"A description of the tool.\",\n value=\"A Python shell. Use this to execute python commands. \"\n \"Input should be a valid python command. \"\n \"If you want to see the output of a value, you should print it out with `print(...)`.\",\n ),\n StrInput(\n name=\"global_imports\",\n display_name=\"Global Imports\",\n info=\"A comma-separated list of modules to import globally, e.g. 'math,numpy'.\",\n value=\"math\",\n ),\n StrInput(\n name=\"code\",\n display_name=\"Python Code\",\n info=\"The Python code to execute.\",\n value=\"print('Hello, World!')\",\n ),\n ]\n\n class PythonREPLSchema(BaseModel):\n code: str = Field(..., description=\"The Python code to execute.\")\n\n def get_globals(self, global_imports: str | list[str]) -> dict:\n global_dict = {}\n if isinstance(global_imports, str):\n modules = [module.strip() for module in global_imports.split(\",\")]\n elif isinstance(global_imports, list):\n modules = global_imports\n else:\n msg = \"global_imports must be either a string or a list\"\n raise TypeError(msg)\n\n for module in modules:\n try:\n imported_module = importlib.import_module(module)\n global_dict[imported_module.__name__] = imported_module\n except ImportError as e:\n msg = f\"Could not import module {module}\"\n raise ImportError(msg) from e\n return global_dict\n\n def build_tool(self) -> Tool:\n _globals = self.get_globals(self.global_imports)\n python_repl = PythonREPL(_globals=_globals)\n\n def run_python_code(code: str) -> str:\n try:\n return python_repl.run(code)\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error running Python code\")\n return f\"Error: {e}\"\n\n tool = StructuredTool.from_function(\n name=self.name,\n description=self.description,\n func=run_python_code,\n args_schema=self.PythonREPLSchema,\n )\n\n self.status = f\"Python REPL Tool created with global imports: {self.global_imports}\"\n return tool\n\n def run_model(self) -> list[Data]:\n tool = self.build_tool()\n result = tool.run(self.code)\n return [Data(data={\"result\": result})]\n" }, "description": { "_input_type": "StrInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, World).json b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, World).json index aba6ffe30bb..0e3a825acad 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, World).json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Basic Prompting (Hello, World).json @@ -727,7 +727,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json index bd7d446788a..5c89d1243eb 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Blog Writer.json @@ -949,7 +949,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Complex Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Complex Agent.json index 84af95b5df2..eda11b1b532 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Complex Agent.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Complex Agent.json @@ -682,7 +682,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Crew, Process # type: ignore\n\nfrom langflow.base.agents.crewai.crew import BaseCrewComponent\nfrom langflow.io import HandleInput\n\n\nclass HierarchicalCrewComponent(BaseCrewComponent):\n display_name: str = \"Hierarchical Crew\"\n description: str = (\n \"Represents a group of agents, defining how they should collaborate and the tasks they should perform.\"\n )\n documentation: str = \"https://docs.crewai.com/how-to/Hierarchical/\"\n icon = \"CrewAI\"\n\n inputs = [\n *BaseCrewComponent._base_inputs,\n HandleInput(name=\"agents\", display_name=\"Agents\", input_types=[\"Agent\"], is_list=True),\n HandleInput(name=\"tasks\", display_name=\"Tasks\", input_types=[\"HierarchicalTask\"], is_list=True),\n HandleInput(name=\"manager_llm\", display_name=\"Manager LLM\", input_types=[\"LanguageModel\"], required=False),\n HandleInput(name=\"manager_agent\", display_name=\"Manager Agent\", input_types=[\"Agent\"], required=False),\n ]\n\n def build_crew(self) -> Crew:\n tasks, agents = self.get_tasks_and_agents()\n return Crew(\n agents=agents,\n tasks=tasks,\n process=Process.hierarchical,\n verbose=self.verbose,\n memory=self.memory,\n cache=self.use_cache,\n max_rpm=self.max_rpm,\n share_crew=self.share_crew,\n function_calling_llm=self.function_calling_llm,\n manager_agent=self.manager_agent,\n manager_llm=self.manager_llm,\n step_callback=self.get_step_callback(),\n task_callback=self.get_task_callback(),\n )\n" + "value": "from crewai import Crew, Process\n\nfrom langflow.base.agents.crewai.crew import BaseCrewComponent\nfrom langflow.io import HandleInput\n\n\nclass HierarchicalCrewComponent(BaseCrewComponent):\n display_name: str = \"Hierarchical Crew\"\n description: str = (\n \"Represents a group of agents, defining how they should collaborate and the tasks they should perform.\"\n )\n documentation: str = \"https://docs.crewai.com/how-to/Hierarchical/\"\n icon = \"CrewAI\"\n\n inputs = [\n *BaseCrewComponent._base_inputs,\n HandleInput(name=\"agents\", display_name=\"Agents\", input_types=[\"Agent\"], is_list=True),\n HandleInput(name=\"tasks\", display_name=\"Tasks\", input_types=[\"HierarchicalTask\"], is_list=True),\n HandleInput(name=\"manager_llm\", display_name=\"Manager LLM\", input_types=[\"LanguageModel\"], required=False),\n HandleInput(name=\"manager_agent\", display_name=\"Manager Agent\", input_types=[\"Agent\"], required=False),\n ]\n\n def build_crew(self) -> Crew:\n tasks, agents = self.get_tasks_and_agents()\n return Crew(\n agents=agents,\n tasks=tasks,\n process=Process.hierarchical,\n verbose=self.verbose,\n memory=self.memory,\n cache=self.use_cache,\n max_rpm=self.max_rpm,\n share_crew=self.share_crew,\n function_calling_llm=self.function_calling_llm,\n manager_agent=self.manager_agent,\n manager_llm=self.manager_llm,\n step_callback=self.get_step_callback(),\n task_callback=self.get_task_callback(),\n )\n" }, "function_calling_llm": { "advanced": true, @@ -956,7 +956,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, @@ -1613,7 +1613,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Agent # type: ignore\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" + "value": "from crewai import Agent\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" }, "goal": { "advanced": false, @@ -1868,7 +1868,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Agent # type: ignore\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" + "value": "from crewai import Agent\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" }, "goal": { "advanced": false, @@ -2123,7 +2123,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, @@ -2894,7 +2894,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, @@ -3344,7 +3344,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, @@ -3818,7 +3818,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Document QA.json b/src/backend/base/langflow/initial_setup/starter_projects/Document QA.json index 06653b068ef..a8a2b78f452 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Document QA.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Document QA.json @@ -805,7 +805,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Hierarchical Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Hierarchical Agent.json index 358ae3373ee..39a38a3abfd 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Hierarchical Agent.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Hierarchical Agent.json @@ -379,7 +379,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Crew, Process # type: ignore\n\nfrom langflow.base.agents.crewai.crew import BaseCrewComponent\nfrom langflow.io import HandleInput\n\n\nclass HierarchicalCrewComponent(BaseCrewComponent):\n display_name: str = \"Hierarchical Crew\"\n description: str = (\n \"Represents a group of agents, defining how they should collaborate and the tasks they should perform.\"\n )\n documentation: str = \"https://docs.crewai.com/how-to/Hierarchical/\"\n icon = \"CrewAI\"\n\n inputs = [\n *BaseCrewComponent._base_inputs,\n HandleInput(name=\"agents\", display_name=\"Agents\", input_types=[\"Agent\"], is_list=True),\n HandleInput(name=\"tasks\", display_name=\"Tasks\", input_types=[\"HierarchicalTask\"], is_list=True),\n HandleInput(name=\"manager_llm\", display_name=\"Manager LLM\", input_types=[\"LanguageModel\"], required=False),\n HandleInput(name=\"manager_agent\", display_name=\"Manager Agent\", input_types=[\"Agent\"], required=False),\n ]\n\n def build_crew(self) -> Crew:\n tasks, agents = self.get_tasks_and_agents()\n return Crew(\n agents=agents,\n tasks=tasks,\n process=Process.hierarchical,\n verbose=self.verbose,\n memory=self.memory,\n cache=self.use_cache,\n max_rpm=self.max_rpm,\n share_crew=self.share_crew,\n function_calling_llm=self.function_calling_llm,\n manager_agent=self.manager_agent,\n manager_llm=self.manager_llm,\n step_callback=self.get_step_callback(),\n task_callback=self.get_task_callback(),\n )\n" + "value": "from crewai import Crew, Process\n\nfrom langflow.base.agents.crewai.crew import BaseCrewComponent\nfrom langflow.io import HandleInput\n\n\nclass HierarchicalCrewComponent(BaseCrewComponent):\n display_name: str = \"Hierarchical Crew\"\n description: str = (\n \"Represents a group of agents, defining how they should collaborate and the tasks they should perform.\"\n )\n documentation: str = \"https://docs.crewai.com/how-to/Hierarchical/\"\n icon = \"CrewAI\"\n\n inputs = [\n *BaseCrewComponent._base_inputs,\n HandleInput(name=\"agents\", display_name=\"Agents\", input_types=[\"Agent\"], is_list=True),\n HandleInput(name=\"tasks\", display_name=\"Tasks\", input_types=[\"HierarchicalTask\"], is_list=True),\n HandleInput(name=\"manager_llm\", display_name=\"Manager LLM\", input_types=[\"LanguageModel\"], required=False),\n HandleInput(name=\"manager_agent\", display_name=\"Manager Agent\", input_types=[\"Agent\"], required=False),\n ]\n\n def build_crew(self) -> Crew:\n tasks, agents = self.get_tasks_and_agents()\n return Crew(\n agents=agents,\n tasks=tasks,\n process=Process.hierarchical,\n verbose=self.verbose,\n memory=self.memory,\n cache=self.use_cache,\n max_rpm=self.max_rpm,\n share_crew=self.share_crew,\n function_calling_llm=self.function_calling_llm,\n manager_agent=self.manager_agent,\n manager_llm=self.manager_llm,\n step_callback=self.get_step_callback(),\n task_callback=self.get_task_callback(),\n )\n" }, "function_calling_llm": { "advanced": true, @@ -655,7 +655,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, @@ -1327,7 +1327,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Agent # type: ignore\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" + "value": "from crewai import Agent\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" }, "goal": { "advanced": false, @@ -1586,7 +1586,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Agent # type: ignore\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" + "value": "from crewai import Agent\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" }, "goal": { "advanced": false, @@ -1842,7 +1842,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, @@ -2521,7 +2521,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Agent # type: ignore\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" + "value": "from crewai import Agent\n\nfrom langflow.custom import Component\nfrom langflow.io import BoolInput, DictInput, HandleInput, MultilineInput, Output\n\n\nclass CrewAIAgentComponent(Component):\n display_name = \"CrewAI Agent\"\n description = \"Represents an agent of CrewAI.\"\n documentation: str = \"https://docs.crewai.com/how-to/LLM-Connections/\"\n icon = \"CrewAI\"\n\n inputs = [\n MultilineInput(name=\"role\", display_name=\"Role\", info=\"The role of the agent.\"),\n MultilineInput(name=\"goal\", display_name=\"Goal\", info=\"The objective of the agent.\"),\n MultilineInput(name=\"backstory\", display_name=\"Backstory\", info=\"The backstory of the agent.\"),\n HandleInput(\n name=\"tools\",\n display_name=\"Tools\",\n input_types=[\"Tool\"],\n is_list=True,\n info=\"Tools at agents disposal\",\n value=[],\n ),\n HandleInput(\n name=\"llm\",\n display_name=\"Language Model\",\n info=\"Language model that will run the agent.\",\n input_types=[\"LanguageModel\"],\n ),\n BoolInput(\n name=\"memory\",\n display_name=\"Memory\",\n info=\"Whether the agent should have memory or not\",\n advanced=True,\n value=True,\n ),\n BoolInput(\n name=\"verbose\",\n display_name=\"Verbose\",\n advanced=True,\n value=False,\n ),\n BoolInput(\n name=\"allow_delegation\",\n display_name=\"Allow Delegation\",\n info=\"Whether the agent is allowed to delegate tasks to other agents.\",\n value=True,\n ),\n BoolInput(\n name=\"allow_code_execution\",\n display_name=\"Allow Code Execution\",\n info=\"Whether the agent is allowed to execute code.\",\n value=False,\n advanced=True,\n ),\n DictInput(\n name=\"kwargs\",\n display_name=\"kwargs\",\n info=\"kwargs of agent.\",\n is_list=True,\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Agent\", name=\"output\", method=\"build_output\"),\n ]\n\n def build_output(self) -> Agent:\n kwargs = self.kwargs or {}\n agent = Agent(\n role=self.role,\n goal=self.goal,\n backstory=self.backstory,\n llm=self.llm,\n verbose=self.verbose,\n memory=self.memory,\n tools=self.tools or [],\n allow_delegation=self.allow_delegation,\n allow_code_execution=self.allow_code_execution,\n **kwargs,\n )\n self.status = repr(agent)\n return agent\n" }, "goal": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json b/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json index a7481e1fb15..c80d99a80b3 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Memory Chatbot.json @@ -584,7 +584,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Agent.json b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Agent.json index 483b8e8e8c7..e801f1089cc 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Sequential Agent.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Sequential Agent.json @@ -425,7 +425,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from crewai import Agent, Crew, Process, Task # type: ignore\n\nfrom langflow.base.agents.crewai.crew import BaseCrewComponent\nfrom langflow.io import HandleInput\nfrom langflow.schema.message import Message\n\n\nclass SequentialCrewComponent(BaseCrewComponent):\n display_name: str = \"Sequential Crew\"\n description: str = \"Represents a group of agents with tasks that are executed sequentially.\"\n documentation: str = \"https://docs.crewai.com/how-to/Sequential/\"\n icon = \"CrewAI\"\n\n inputs = [\n *BaseCrewComponent._base_inputs,\n HandleInput(name=\"tasks\", display_name=\"Tasks\", input_types=[\"SequentialTask\"], is_list=True),\n ]\n\n def get_tasks_and_agents(self) -> tuple[list[Task], list[Agent]]:\n return self.tasks, [task.agent for task in self.tasks]\n\n def build_crew(self) -> Message:\n tasks, agents = self.get_tasks_and_agents()\n return Crew(\n agents=agents,\n tasks=tasks,\n process=Process.sequential,\n verbose=self.verbose,\n memory=self.memory,\n cache=self.use_cache,\n max_rpm=self.max_rpm,\n share_crew=self.share_crew,\n function_calling_llm=self.function_calling_llm,\n step_callback=self.get_step_callback(),\n task_callback=self.get_task_callback(),\n )\n" + "value": "from crewai import Agent, Crew, Process, Task\n\nfrom langflow.base.agents.crewai.crew import BaseCrewComponent\nfrom langflow.io import HandleInput\nfrom langflow.schema.message import Message\n\n\nclass SequentialCrewComponent(BaseCrewComponent):\n display_name: str = \"Sequential Crew\"\n description: str = \"Represents a group of agents with tasks that are executed sequentially.\"\n documentation: str = \"https://docs.crewai.com/how-to/Sequential/\"\n icon = \"CrewAI\"\n\n inputs = [\n *BaseCrewComponent._base_inputs,\n HandleInput(name=\"tasks\", display_name=\"Tasks\", input_types=[\"SequentialTask\"], is_list=True),\n ]\n\n def get_tasks_and_agents(self) -> tuple[list[Task], list[Agent]]:\n return self.tasks, [task.agent for task in self.tasks]\n\n def build_crew(self) -> Message:\n tasks, agents = self.get_tasks_and_agents()\n return Crew(\n agents=agents,\n tasks=tasks,\n process=Process.sequential,\n verbose=self.verbose,\n memory=self.memory,\n cache=self.use_cache,\n max_rpm=self.max_rpm,\n share_crew=self.share_crew,\n function_calling_llm=self.function_calling_llm,\n step_callback=self.get_step_callback(),\n task_callback=self.get_task_callback(),\n )\n" }, "function_calling_llm": { "advanced": true, @@ -669,7 +669,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json index 09010729dab..be4827bdb77 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Travel Planning Agents.json @@ -909,7 +909,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "_input_type": "MessageInput", @@ -2372,7 +2372,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._evaluate_expression,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n def eval_expr(node):\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](eval_expr(node.operand))\n raise TypeError(node)\n\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except Exception as e:\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n" + "value": "import ast\nimport operator\n\nfrom langchain.tools import StructuredTool\nfrom loguru import logger\nfrom pydantic import BaseModel, Field\n\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.field_typing import Tool\nfrom langflow.inputs import MessageTextInput\nfrom langflow.schema import Data\n\n\nclass CalculatorToolComponent(LCToolComponent):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n icon = \"calculator\"\n name = \"CalculatorTool\"\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n ),\n ]\n\n class CalculatorToolSchema(BaseModel):\n expression: str = Field(..., description=\"The arithmetic expression to evaluate.\")\n\n def run_model(self) -> list[Data]:\n return self._evaluate_expression(self.expression)\n\n def build_tool(self) -> Tool:\n return StructuredTool.from_function(\n name=\"calculator\",\n description=\"Evaluate basic arithmetic expressions. Input should be a string containing the expression.\",\n func=self._evaluate_expression,\n args_schema=self.CalculatorToolSchema,\n )\n\n def _evaluate_expression(self, expression: str) -> list[Data]:\n try:\n # Define the allowed operators\n operators = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n def eval_expr(node):\n if isinstance(node, ast.Num):\n return node.n\n if isinstance(node, ast.BinOp):\n return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))\n if isinstance(node, ast.UnaryOp):\n return operators[type(node.op)](eval_expr(node.operand))\n raise TypeError(node)\n\n # Parse the expression and evaluate it\n tree = ast.parse(expression, mode=\"eval\")\n result = eval_expr(tree.body)\n\n # Format the result to a reasonable number of decimal places\n formatted_result = f\"{result:.6f}\".rstrip(\"0\").rstrip(\".\")\n\n self.status = formatted_result\n return [Data(data={\"result\": formatted_result})]\n\n except (SyntaxError, TypeError, KeyError) as e:\n error_message = f\"Invalid expression: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n except Exception as e: # noqa: BLE001\n logger.opt(exception=True).debug(\"Error evaluating expression\")\n error_message = f\"Error: {e}\"\n self.status = error_message\n return [Data(data={\"error\": error_message})]\n" }, "expression": { "_input_type": "MessageTextInput", diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json index d7777096f89..c9ed1859a44 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json @@ -683,7 +683,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import os\n\nfrom astrapy.admin import parse_api_endpoint\nfrom loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://docs.langflow.org/starter-projects-vector-store-rag\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n advanced=os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\",\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"Database\" if os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\" else \"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n idx = len(items)\n for i, (key, _value) in enumerate(items):\n if key == field_name:\n idx = i + 1\n break\n\n items.insert(idx, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=\"The embedding model to use for the selected provider. Each provider has a different set of \"\n \"models available (full list at \"\n \"https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n\"\n f\"{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. \"\n \"If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider \"\n \"with each request to Astra DB. \"\n \"This may be used when Vectorize is configured for the collection, \"\n \"but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_04_authentication or kwargs.get(\"z_04_authentication\", {}))}\n\n api_key_name = self.z_02_api_key_name or kwargs.get(\"z_02_api_key_name\")\n provider_key = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_00_model_name or kwargs.get(\"z_00_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_01_model_parameters or kwargs.get(\"z_01_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": provider_key,\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError as e:\n msg = f\"Invalid setup mode: {self.setup_mode}\"\n raise ValueError(msg) from e\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"environment\": parse_api_endpoint(self.api_endpoint).environment,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise ValueError(msg)\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n if self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n msg = f\"Error performing search in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" + "value": "import os\n\nfrom astrapy.admin import parse_api_endpoint\nfrom loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://docs.langflow.org/starter-projects-vector-store-rag\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n advanced=os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\",\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"Database\" if os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\" else \"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n idx = len(items)\n for i, (key, _value) in enumerate(items):\n if key == field_name:\n idx = i + 1\n break\n\n items.insert(idx, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=\"The embedding model to use for the selected provider. Each provider has a different set of \"\n \"models available (full list at \"\n \"https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n\"\n f\"{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. \"\n \"If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider \"\n \"with each request to Astra DB. \"\n \"This may be used when Vectorize is configured for the collection, \"\n \"but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_04_authentication or kwargs.get(\"z_04_authentication\", {}))}\n\n api_key_name = self.z_02_api_key_name or kwargs.get(\"z_02_api_key_name\")\n provider_key = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_00_model_name or kwargs.get(\"z_00_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_01_model_parameters or kwargs.get(\"z_01_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": provider_key,\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError as e:\n msg = f\"Invalid setup mode: {self.setup_mode}\"\n raise ValueError(msg) from e\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"environment\": parse_api_endpoint(self.api_endpoint).environment,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n if self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n msg = f\"Error performing search in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" }, "collection_indexing_policy": { "advanced": true, @@ -1948,7 +1948,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import os\n\nfrom astrapy.admin import parse_api_endpoint\nfrom loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://docs.langflow.org/starter-projects-vector-store-rag\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n advanced=os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\",\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"Database\" if os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\" else \"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n idx = len(items)\n for i, (key, _value) in enumerate(items):\n if key == field_name:\n idx = i + 1\n break\n\n items.insert(idx, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=\"The embedding model to use for the selected provider. Each provider has a different set of \"\n \"models available (full list at \"\n \"https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n\"\n f\"{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. \"\n \"If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider \"\n \"with each request to Astra DB. \"\n \"This may be used when Vectorize is configured for the collection, \"\n \"but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_04_authentication or kwargs.get(\"z_04_authentication\", {}))}\n\n api_key_name = self.z_02_api_key_name or kwargs.get(\"z_02_api_key_name\")\n provider_key = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_00_model_name or kwargs.get(\"z_00_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_01_model_parameters or kwargs.get(\"z_01_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": provider_key,\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError as e:\n msg = f\"Invalid setup mode: {self.setup_mode}\"\n raise ValueError(msg) from e\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"environment\": parse_api_endpoint(self.api_endpoint).environment,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise ValueError(msg)\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n if self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n msg = f\"Error performing search in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" + "value": "import os\n\nfrom astrapy.admin import parse_api_endpoint\nfrom loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://docs.langflow.org/starter-projects-vector-store-rag\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n advanced=os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\",\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"Database\" if os.getenv(\"ASTRA_ENHANCED\", \"false\").lower() == \"true\" else \"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n idx = len(items)\n for i, (key, _value) in enumerate(items):\n if key == field_name:\n idx = i + 1\n break\n\n items.insert(idx, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=\"The embedding model to use for the selected provider. Each provider has a different set of \"\n \"models available (full list at \"\n \"https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n\"\n f\"{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. \"\n \"If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider \"\n \"with each request to Astra DB. \"\n \"This may be used when Vectorize is configured for the collection, \"\n \"but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_04_authentication or kwargs.get(\"z_04_authentication\", {}))}\n\n api_key_name = self.z_02_api_key_name or kwargs.get(\"z_02_api_key_name\")\n provider_key = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_00_model_name or kwargs.get(\"z_00_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_01_model_parameters or kwargs.get(\"z_01_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": provider_key,\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError as e:\n msg = f\"Invalid setup mode: {self.setup_mode}\"\n raise ValueError(msg) from e\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"environment\": parse_api_endpoint(self.api_endpoint).environment,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n if self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n msg = f\"Error performing search in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" }, "collection_indexing_policy": { "advanced": true, @@ -3327,7 +3327,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\") # type: ignore\n else:\n output = output.bind(response_format={\"type\": \"json_object\"}) # type: ignore\n\n return output # type: ignore\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\") # type: ignore\n if message:\n return message\n return None\n" + "value": "import operator\nfrom functools import reduce\n\nfrom langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.inputs.inputs import HandleInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(name=\"model_kwargs\", display_name=\"Model Kwargs\", advanced=True),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DictInput(\n name=\"output_schema\",\n is_list=True,\n display_name=\"Schema\",\n advanced=True,\n info=\"The schema for the Output of the model. \"\n \"You must pass the word JSON in the prompt. \"\n \"If left blank, JSON mode will be disabled.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n ),\n FloatInput(name=\"temperature\", display_name=\"Temperature\", value=0.1),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n HandleInput(\n name=\"output_parser\",\n display_name=\"Output Parser\",\n info=\"The parser to use to parse the output of the model\",\n advanced=True,\n input_types=[\"OutputParser\"],\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # self.output_schema is a list of dictionaries\n # let's convert it to a dictionary\n output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = bool(output_schema_dict) or self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key) if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n if output_schema_dict:\n output = output.with_structured_output(schema=output_schema_dict, method=\"json_mode\")\n else:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"\n Get a message from an OpenAI exception.\n\n Args:\n exception (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n" }, "input_value": { "advanced": false, diff --git a/src/backend/base/langflow/utils/validate.py b/src/backend/base/langflow/utils/validate.py index c507ffaa9f5..3898c976c8c 100644 --- a/src/backend/base/langflow/utils/validate.py +++ b/src/backend/base/langflow/utils/validate.py @@ -328,5 +328,5 @@ def extract_class_name(code): for node in module.body: if isinstance(node, ast.ClassDef): return node.name - msg = "No class definition found in the code string" + msg = f"No class definition found in the code string. Code snippet: {code[:100]}" raise ValueError(msg)