diff --git a/.github/workflows/python_test.yml b/.github/workflows/python_test.yml index 7128870fad0e..ff76a5bf8836 100644 --- a/.github/workflows/python_test.yml +++ b/.github/workflows/python_test.yml @@ -50,7 +50,7 @@ jobs: with: timeout_minutes: 12 max_attempts: 2 - command: make unit_tests args="--splits ${{ matrix.splitCount }} --group ${{ matrix.group }}" + command: make unit_tests async=false args="--splits ${{ matrix.splitCount }} --group ${{ matrix.group }}" test-cli: name: Test CLI - Python ${{ matrix.python-version }} diff --git a/Makefile b/Makefile index b461ca736e5f..484fc27bdaa4 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ env ?= .env open_browser ?= true path = src/backend/base/langflow/frontend workers ?= 1 - +async ?= true all: help ###################### @@ -130,14 +130,25 @@ coverage: ## run the tests and generate a coverage report @poetry run coverage erase unit_tests: ## run unit tests +ifeq ($(async), true) + poetry run pytest src/backend/tests \ + --ignore=src/backend/tests/integration \ + --instafail -n auto -ra -m "not api_key_required" \ + --durations-path src/backend/tests/.test_durations \ + --splitting-algorithm least_duration \ + $(args) +else poetry run pytest src/backend/tests \ --ignore=src/backend/tests/integration \ - --instafail -ra -n auto -m "not api_key_required" \ + --instafail -ra -m "not api_key_required" \ + --durations-path src/backend/tests/.test_durations \ + --splitting-algorithm least_duration \ $(args) +endif integration_tests: ## run integration tests poetry run pytest src/backend/tests/integration \ - --instafail -ra -n auto \ + --instafail -ra \ $(args) tests: ## run unit, integration, coverage tests diff --git a/src/backend/base/langflow/base/prompts/api_utils.py b/src/backend/base/langflow/base/prompts/api_utils.py index 29516241a05e..fd5ddd9941e2 100644 --- a/src/backend/base/langflow/base/prompts/api_utils.py +++ b/src/backend/base/langflow/base/prompts/api_utils.py @@ -6,7 +6,7 @@ from loguru import logger from langflow.interface.utils import extract_input_variables_from_prompt -from langflow.template.field.prompt import DefaultPromptField +from langflow.inputs.inputs import DefaultPromptField _INVALID_CHARACTERS = { diff --git a/src/backend/base/langflow/graph/edge/base.py b/src/backend/base/langflow/graph/edge/base.py index 549b3f0950f4..f3520e72cf18 100644 --- a/src/backend/base/langflow/graph/edge/base.py +++ b/src/backend/base/langflow/graph/edge/base.py @@ -1,51 +1,31 @@ -from typing import TYPE_CHECKING, Any, List, Optional, cast +from typing import TYPE_CHECKING, Any, cast from loguru import logger -from pydantic import BaseModel, Field, field_validator -from langflow.graph.edge.schema import EdgeData +from langflow.graph.edge.schema import EdgeData, SourceHandle, TargetHandle, TargetHandleDict from langflow.schema.schema import INPUT_FIELD_NAME if TYPE_CHECKING: from langflow.graph.vertex.base import Vertex -class SourceHandle(BaseModel): - baseClasses: list[str] = Field(default_factory=list, description="List of base classes for the source handle.") - dataType: str = Field(..., description="Data type for the source handle.") - id: str = Field(..., description="Unique identifier for the source handle.") - name: Optional[str] = Field(None, description="Name of the source handle.") - output_types: List[str] = Field(default_factory=list, description="List of output types for the source handle.") - - @field_validator("name", mode="before") - @classmethod - def validate_name(cls, v, _info): - if _info.data["dataType"] == "GroupNode": - # 'OpenAIModel-u4iGV_text_output' - splits = v.split("_", 1) - if len(splits) != 2: - raise ValueError(f"Invalid source handle name {v}") - v = splits[1] - return v - - -class TargetHandle(BaseModel): - fieldName: str = Field(..., description="Field name for the target handle.") - id: str = Field(..., description="Unique identifier for the target handle.") - inputTypes: Optional[List[str]] = Field(None, description="List of input types for the target handle.") - type: str = Field(..., description="Type of the target handle.") - - class Edge: def __init__(self, source: "Vertex", target: "Vertex", edge: EdgeData): self.source_id: str = source.id if source else "" self.target_id: str = target.id if target else "" + self.valid_handles: bool = False + self.target_param: str | None = None + self._target_handle: TargetHandleDict | str | None = None + self._data = edge.copy() if data := edge.get("data", {}): self._source_handle = data.get("sourceHandle", {}) - self._target_handle = data.get("targetHandle", {}) + self._target_handle = cast(TargetHandleDict, data.get("targetHandle", {})) self.source_handle: SourceHandle = SourceHandle(**self._source_handle) - self.target_handle: TargetHandle = TargetHandle(**self._target_handle) - self.target_param = self.target_handle.fieldName + if isinstance(self._target_handle, dict): + self.target_handle: TargetHandle = TargetHandle(**self._target_handle) + else: + raise ValueError("Target handle is not a dictionary") + self.target_param = self.target_handle.field_name # validate handles self.validate_handles(source, target) else: @@ -55,23 +35,31 @@ def __init__(self, source: "Vertex", target: "Vertex", edge: EdgeData): self._target_handle = edge.get("targetHandle", "") # type: ignore # 'BaseLoader;BaseOutputParser|documents|PromptTemplate-zmTlD' # target_param is documents - self.target_param = cast(str, self._target_handle.split("|")[1]) # type: ignore + if isinstance(self._target_handle, str): + self.target_param = self._target_handle.split("|")[1] + self.source_handle = None + self.target_handle = None + else: + raise ValueError("Target handle is not a string") # Validate in __init__ to fail fast self.validate_edge(source, target) + def to_data(self): + return self._data + def validate_handles(self, source, target) -> None: - if isinstance(self._source_handle, str) or self.source_handle.baseClasses: + if isinstance(self._source_handle, str) or self.source_handle.base_classes: self._legacy_validate_handles(source, target) else: self._validate_handles(source, target) def _validate_handles(self, source, target) -> None: - if self.target_handle.inputTypes is None: + if self.target_handle.input_types is None: self.valid_handles = self.target_handle.type in self.source_handle.output_types elif self.source_handle.output_types is not None: self.valid_handles = ( - any(output_type in self.target_handle.inputTypes for output_type in self.source_handle.output_types) + any(output_type in self.target_handle.input_types for output_type in self.source_handle.output_types) or self.target_handle.type in self.source_handle.output_types ) @@ -81,12 +69,12 @@ def _validate_handles(self, source, target) -> None: raise ValueError(f"Edge between {source.vertex_type} and {target.vertex_type} " f"has invalid handles") def _legacy_validate_handles(self, source, target) -> None: - if self.target_handle.inputTypes is None: - self.valid_handles = self.target_handle.type in self.source_handle.baseClasses + if self.target_handle.input_types is None: + self.valid_handles = self.target_handle.type in self.source_handle.base_classes else: self.valid_handles = ( - any(baseClass in self.target_handle.inputTypes for baseClass in self.source_handle.baseClasses) - or self.target_handle.type in self.source_handle.baseClasses + any(baseClass in self.target_handle.input_types for baseClass in self.source_handle.base_classes) + or self.target_handle.type in self.source_handle.base_classes ) if not self.valid_handles: logger.debug(self.source_handle) @@ -101,9 +89,9 @@ def __setstate__(self, state): self.target_handle = state.get("target_handle") def validate_edge(self, source, target) -> None: - # If the self.source_handle has baseClasses, then we are using the legacy + # If the self.source_handle has base_classes, then we are using the legacy # way of defining the source and target handles - if isinstance(self._source_handle, str) or self.source_handle.baseClasses: + if isinstance(self._source_handle, str) or self.source_handle.base_classes: self._legacy_validate_edge(source, target) else: self._validate_edge(source, target) @@ -230,5 +218,5 @@ def __repr__(self) -> str: if (hasattr(self, "source_handle") and self.source_handle) and ( hasattr(self, "target_handle") and self.target_handle ): - return f"{self.source_id} -[{self.source_handle.name}->{self.target_handle.fieldName}]-> {self.target_id}" + return f"{self.source_id} -[{self.source_handle.name}->{self.target_handle.field_name}]-> {self.target_id}" return f"{self.source_id} -[{self.target_param}]-> {self.target_id}" diff --git a/src/backend/base/langflow/graph/edge/schema.py b/src/backend/base/langflow/graph/edge/schema.py index d8ae9963c18a..7e0f04108ac1 100644 --- a/src/backend/base/langflow/graph/edge/schema.py +++ b/src/backend/base/langflow/graph/edge/schema.py @@ -1,6 +1,6 @@ from typing import Any, List, Optional -from pydantic import Field, field_validator +from pydantic import ConfigDict, Field, field_validator from typing_extensions import TypedDict from langflow.helpers.base_model import BaseModel @@ -39,7 +39,8 @@ def format(self, sep: str = "\n") -> str: class TargetHandle(BaseModel): - fieldName: str = Field(..., alias="fieldName", description="Field name for the target handle.") + model_config = ConfigDict(populate_by_name=True) + field_name: str = Field(..., alias="fieldName", description="Field name for the target handle.") id: str = Field(..., description="Unique identifier for the target handle.") input_types: List[str] = Field( default_factory=list, alias="inputTypes", description="List of input types for the target handle." @@ -48,6 +49,7 @@ class TargetHandle(BaseModel): class SourceHandle(BaseModel): + model_config = ConfigDict(populate_by_name=True) base_classes: list[str] = Field( default_factory=list, alias="baseClasses", description="List of base classes for the source handle." ) diff --git a/src/backend/base/langflow/graph/graph/base.py b/src/backend/base/langflow/graph/graph/base.py index e48519cb4c2e..393f657315ed 100644 --- a/src/backend/base/langflow/graph/graph/base.py +++ b/src/backend/base/langflow/graph/graph/base.py @@ -1,4 +1,5 @@ import asyncio +import json import uuid from collections import defaultdict, deque from datetime import datetime, timezone @@ -14,11 +15,12 @@ from langflow.graph.edge.schema import EdgeData from langflow.graph.graph.constants import Finish, lazy_load_vertex_dict from langflow.graph.graph.runnable_vertices_manager import RunnableVerticesManager -from langflow.graph.graph.schema import VertexBuildResult +from langflow.graph.graph.schema import GraphData, GraphDump, VertexBuildResult from langflow.graph.graph.state_manager import GraphStateManager from langflow.graph.graph.utils import find_start_component_id, process_flow, sort_up_to_vertex from langflow.graph.schema import InterfaceComponentTypes, RunOutputs from langflow.graph.vertex.base import Vertex, VertexStates +from langflow.graph.vertex.schema import NodeData from langflow.graph.vertex.types import ComponentVertex, InterfaceVertex, StateVertex from langflow.schema import Data from langflow.schema.schema import INPUT_FIELD_NAME, InputType @@ -75,7 +77,7 @@ def __init__( self.vertices: List[Vertex] = [] self.run_manager = RunnableVerticesManager() self.state_manager = GraphStateManager() - self._vertices: List[dict] = [] + self._vertices: List[NodeData] = [] self._edges: List[EdgeData] = [] self.top_level_vertices: List[str] = [] self.vertex_map: Dict[str, Vertex] = {} @@ -86,6 +88,7 @@ def __init__( self._run_queue: deque[str] = deque() self._first_layer: List[str] = [] self._lock = asyncio.Lock() + self.raw_graph_data: GraphData = {"nodes": [], "edges": []} try: self.tracing_service: "TracingService" | None = get_tracing_service() except Exception as exc: @@ -97,7 +100,39 @@ def __init__( if (start is not None and end is None) or (start is None and end is not None): raise ValueError("You must provide both input and output components") - def add_nodes_and_edges(self, nodes: List[Dict], edges: List[EdgeData]): + def dumps( + self, + name: Optional[str] = None, + description: Optional[str] = None, + endpoint_name: Optional[str] = None, + ) -> str: + graph_dict = self.dump(name, description, endpoint_name) + return json.dumps(graph_dict, indent=4, sort_keys=True) + + def dump( + self, name: Optional[str] = None, description: Optional[str] = None, endpoint_name: Optional[str] = None + ) -> GraphDump: + if self.raw_graph_data != {"nodes": [], "edges": []}: + data_dict = self.raw_graph_data + else: + # we need to convert the vertices and edges to json + nodes = [node.to_data() for node in self.vertices] + edges = [edge.to_data() for edge in self.edges] + self.raw_graph_data = {"nodes": nodes, "edges": edges} + data_dict = self.raw_graph_data + graph_dict: GraphDump = { + "data": data_dict, + "is_component": len(data_dict.get("nodes", [])) == 1 and data_dict["edges"] == [], + } + if name: + graph_dict["name"] = name + if description: + graph_dict["description"] = description + if endpoint_name: + graph_dict["endpoint_name"] = endpoint_name + return graph_dict + + def add_nodes_and_edges(self, nodes: List[NodeData], edges: List[EdgeData]): self._vertices = nodes self._edges = edges self.raw_graph_data = {"nodes": nodes, "edges": edges} @@ -183,7 +218,7 @@ async def async_start(self, inputs: Optional[List[dict]] = None): return def start(self, inputs: Optional[List[dict]] = None) -> Generator: - #! Change this soon + #! Change this ASAP nest_asyncio.apply() loop = asyncio.get_event_loop() async_gen = self.async_start(inputs) @@ -208,8 +243,7 @@ def _add_edge(self, edge: EdgeData): self.in_degree_map[target_id] += 1 self.parent_child_map[source_id].append(target_id) - # TODO: Create a TypedDict to represente the node - def add_node(self, node: dict): + def add_node(self, node: NodeData): self._vertices.append(node) def add_edge(self, edge: EdgeData): @@ -1400,7 +1434,7 @@ def _build_vertices(self) -> List[Vertex]: return vertices - def _create_vertex(self, frontend_data: dict): + def _create_vertex(self, frontend_data: NodeData): vertex_data = frontend_data["data"] vertex_type: str = vertex_data["type"] # type: ignore vertex_base_type: str = vertex_data["node"]["template"]["_type"] # type: ignore diff --git a/src/backend/base/langflow/graph/graph/schema.py b/src/backend/base/langflow/graph/graph/schema.py index 30d67255fd96..306ea7ba63c8 100644 --- a/src/backend/base/langflow/graph/graph/schema.py +++ b/src/backend/base/langflow/graph/graph/schema.py @@ -1,10 +1,35 @@ from typing import TYPE_CHECKING, NamedTuple +from typing_extensions import NotRequired, TypedDict + +from langflow.graph.edge.schema import EdgeData +from langflow.graph.vertex.schema import NodeData + if TYPE_CHECKING: from langflow.graph.schema import ResultData from langflow.graph.vertex.base import Vertex +class ViewPort(TypedDict): + x: float + y: float + zoom: float + + +class GraphData(TypedDict): + nodes: list[NodeData] + edges: list[EdgeData] + viewport: NotRequired[ViewPort] + + +class GraphDump(TypedDict, total=False): + data: GraphData + is_component: bool + name: str + description: str + endpoint_name: str + + class VertexBuildResult(NamedTuple): result_dict: "ResultData" params: str diff --git a/src/backend/base/langflow/graph/vertex/base.py b/src/backend/base/langflow/graph/vertex/base.py index 357e9dc2b906..b47dcf86e7c2 100644 --- a/src/backend/base/langflow/graph/vertex/base.py +++ b/src/backend/base/langflow/graph/vertex/base.py @@ -13,6 +13,7 @@ from langflow.exceptions.component import ComponentBuildException from langflow.graph.schema import INPUT_COMPONENTS, OUTPUT_COMPONENTS, InterfaceComponentTypes, ResultData from langflow.graph.utils import UnbuiltObject, UnbuiltResult, log_transaction +from langflow.graph.vertex.schema import NodeData from langflow.interface.initialize import loading from langflow.interface.listing import lazy_load_dict from langflow.schema.artifact import ArtifactType @@ -42,7 +43,7 @@ class VertexStates(str, Enum): class Vertex: def __init__( self, - data: Dict, + data: NodeData, graph: "Graph", base_type: Optional[str] = None, is_task: bool = False, @@ -63,7 +64,7 @@ def __init__( self.has_external_input = False self.has_external_output = False self.graph = graph - self._data = data + self._data = data.copy() self.base_type: Optional[str] = base_type self.outputs: List[Dict] = [] self._parse_data() @@ -101,6 +102,9 @@ def set_input_value(self, name: str, value: Any): raise ValueError(f"Vertex {self.id} does not have a component instance.") self._custom_component._set_input_value(name, value) + def to_data(self): + return self._data + def add_component_instance(self, component_instance: "Component"): component_instance.set_vertex(self) self._custom_component = component_instance diff --git a/src/backend/base/langflow/graph/vertex/schema.py b/src/backend/base/langflow/graph/vertex/schema.py new file mode 100644 index 000000000000..98f6ba5ef204 --- /dev/null +++ b/src/backend/base/langflow/graph/vertex/schema.py @@ -0,0 +1,21 @@ +from typing import Dict + +from typing_extensions import NotRequired, TypedDict + + +class Position(TypedDict): + x: float + y: float + + +class NodeData(TypedDict): + id: str + data: Dict + dragging: NotRequired[bool] + height: NotRequired[int] + width: NotRequired[int] + position: NotRequired[Position] + positionAbsolute: NotRequired[Position] + selected: NotRequired[bool] + parent_node_id: NotRequired[str] + type: str diff --git a/src/backend/base/langflow/graph/vertex/types.py b/src/backend/base/langflow/graph/vertex/types.py index 2078e341c081..4c033227a3ef 100644 --- a/src/backend/base/langflow/graph/vertex/types.py +++ b/src/backend/base/langflow/graph/vertex/types.py @@ -9,6 +9,7 @@ from langflow.graph.schema import CHAT_COMPONENTS, RECORDS_COMPONENTS, InterfaceComponentTypes, ResultData from langflow.graph.utils import UnbuiltObject, log_transaction, log_vertex_build, serialize_field from langflow.graph.vertex.base import Vertex +from langflow.graph.vertex.schema import NodeData from langflow.inputs.inputs import InputTypes from langflow.schema import Data from langflow.schema.artifact import ArtifactType @@ -23,7 +24,7 @@ class CustomComponentVertex(Vertex): - def __init__(self, data: Dict, graph): + def __init__(self, data: NodeData, graph): super().__init__(data, graph=graph, base_type="custom_components") def _built_object_repr(self): @@ -32,9 +33,19 @@ def _built_object_repr(self): class ComponentVertex(Vertex): - def __init__(self, data: Dict, graph): + def __init__(self, data: NodeData, graph): super().__init__(data, graph=graph, base_type="component") + def get_input(self, name: str) -> InputTypes: + if self._custom_component is None: + raise ValueError(f"Vertex {self.id} does not have a component instance.") + return self._custom_component.get_input(name) + + def get_output(self, name: str) -> Output: + if self._custom_component is None: + raise ValueError(f"Vertex {self.id} does not have a component instance.") + return self._custom_component.get_output(name) + def _built_object_repr(self): if self.artifacts and "repr" in self.artifacts: return self.artifacts["repr"] or super()._built_object_repr() @@ -58,16 +69,6 @@ def _update_built_object_and_artifacts(self, result): for key, value in self._built_object.items(): self.add_result(key, value) - def get_input(self, name: str) -> InputTypes: - if self._custom_component is None: - raise ValueError(f"Vertex {self.id} does not have a component instance.") - return self._custom_component.get_input(name) - - def get_output(self, name: str) -> Output: - if self._custom_component is None: - raise ValueError(f"Vertex {self.id} does not have a component instance.") - return self._custom_component.get_output(name) - def get_edge_with_target(self, target_id: str) -> Generator["ContractEdge", None, None]: """ Get the edge with the target id. @@ -174,7 +175,7 @@ def _finalize_build(self): class InterfaceVertex(ComponentVertex): - def __init__(self, data: Dict, graph): + def __init__(self, data: NodeData, graph): super().__init__(data, graph=graph) self.steps = [self._build, self._run] @@ -424,7 +425,7 @@ def _is_chat_input(self): class StateVertex(ComponentVertex): - def __init__(self, data: Dict, graph): + def __init__(self, data: NodeData, graph): super().__init__(data, graph=graph) self.steps = [self._build] self.is_state = False diff --git a/src/backend/base/langflow/initial_setup/setup.py b/src/backend/base/langflow/initial_setup/setup.py index dfcd1011a57c..dfedeec7e2a6 100644 --- a/src/backend/base/langflow/initial_setup/setup.py +++ b/src/backend/base/langflow/initial_setup/setup.py @@ -2,6 +2,7 @@ import json import os import shutil +import time from collections import defaultdict from copy import deepcopy from datetime import datetime, timezone @@ -23,6 +24,7 @@ from langflow.services.database.models.user.crud import get_user_by_username from langflow.services.deps import get_settings_service, get_storage_service, get_variable_service, session_scope from langflow.template.field.prompt import DEFAULT_PROMPT_INTUT_TYPES +from langflow.utils.util import escape_json_dump STARTER_FOLDER_NAME = "Starter Projects" STARTER_FOLDER_DESCRIPTION = "Starter projects to help you get started in Langflow." @@ -319,10 +321,6 @@ def update_edges_with_latest_component_versions(project_data): return project_data_copy -def escape_json_dump(edge_dict): - return json.dumps(edge_dict).replace('"', "œ") - - def log_node_changes(node_changes_log): # The idea here is to log the changes that were made to the nodes in debug # Something like: @@ -339,17 +337,23 @@ def log_node_changes(node_changes_log): logger.debug("\n".join(formatted_messages)) -def load_starter_projects() -> list[tuple[Path, dict]]: +def load_starter_projects(retries=3, delay=1) -> list[tuple[Path, dict]]: starter_projects = [] folder = Path(__file__).parent / "starter_projects" for file in folder.glob("*.json"): - with open(file, "r", encoding="utf-8") as f: - try: - project = orjson.loads(f.read()) - starter_projects.append((file, project)) - logger.info(f"Loaded starter project {file}") - except orjson.JSONDecodeError as e: - raise ValueError(f"Error loading starter project {file}: {e}") + attempt = 0 + while attempt < retries: + with open(file, "r", encoding="utf-8") as f: + try: + project = orjson.loads(f.read()) + starter_projects.append((file, project)) + logger.info(f"Loaded starter project {file}") + break # Break if load is successful + except orjson.JSONDecodeError as e: + attempt += 1 + if attempt >= retries: + raise ValueError(f"Error loading starter project {file}: {e}") + time.sleep(delay) # Wait before retrying return starter_projects diff --git a/src/backend/base/langflow/inputs/input_mixin.py b/src/backend/base/langflow/inputs/input_mixin.py index 7b7f7c2b1932..5e885c1a794a 100644 --- a/src/backend/base/langflow/inputs/input_mixin.py +++ b/src/backend/base/langflow/inputs/input_mixin.py @@ -27,9 +27,13 @@ class FieldTypes(str, Enum): # Base mixin for common input field attributes and methods class BaseInputMixin(BaseModel, validate_assignment=True): # type: ignore - model_config = ConfigDict(arbitrary_types_allowed=True, extra="forbid", populate_by_name=True) + model_config = ConfigDict( + arbitrary_types_allowed=True, + extra="forbid", + populate_by_name=True, + ) - field_type: SerializableFieldTypes = Field(default=FieldTypes.TEXT) + field_type: SerializableFieldTypes = Field(default=FieldTypes.TEXT, alias="type") required: bool = False """Specifies if the field is required. Defaults to False.""" diff --git a/src/backend/base/langflow/schema/data.py b/src/backend/base/langflow/schema/data.py index 2becf0e6cdaf..4bc34e297f6b 100644 --- a/src/backend/base/langflow/schema/data.py +++ b/src/backend/base/langflow/schema/data.py @@ -24,6 +24,7 @@ class Data(BaseModel): default_value: Optional[str] = "" @model_validator(mode="before") + @classmethod def validate_data(cls, values): if not isinstance(values, dict): raise ValueError("Data must be a dictionary") diff --git a/src/backend/base/langflow/template/field/base.py b/src/backend/base/langflow/template/field/base.py index f3583e466da2..4a09c656befa 100644 --- a/src/backend/base/langflow/template/field/base.py +++ b/src/backend/base/langflow/template/field/base.py @@ -184,12 +184,9 @@ def to_dict(self): return self.model_dump(by_alias=True, exclude_none=True) def add_types(self, _type: list[Any]): - for type_ in _type: - if self.types and type_ in self.types: - continue - if self.types is None: - self.types = [] - self.types.append(type_) + if self.types is None: + self.types = [] + self.types.extend([t for t in _type if t not in self.types]) def set_selected(self): if not self.selected and self.types: diff --git a/src/backend/base/langflow/template/field/prompt.py b/src/backend/base/langflow/template/field/prompt.py index 5ad43946ebbc..aed416cbca04 100644 --- a/src/backend/base/langflow/template/field/prompt.py +++ b/src/backend/base/langflow/template/field/prompt.py @@ -1,3 +1,3 @@ # This file is for backwards compatibility from langflow.inputs.inputs import DEFAULT_PROMPT_INTUT_TYPES # noqa -from langflow.inputs import DefaultPromptField # noqa +from langflow.inputs.inputs import DefaultPromptField # noqa diff --git a/src/backend/base/langflow/utils/util.py b/src/backend/base/langflow/utils/util.py index d58fdc4f8705..1c91869041f3 100644 --- a/src/backend/base/langflow/utils/util.py +++ b/src/backend/base/langflow/utils/util.py @@ -1,5 +1,6 @@ import importlib import inspect +import json import re from functools import wraps from pathlib import Path @@ -456,3 +457,7 @@ def is_class_method(func, cls): Check if a function is a class method. """ return inspect.ismethod(func) and func.__self__ is cls.__class__ + + +def escape_json_dump(edge_dict): + return json.dumps(edge_dict).replace('"', "œ") diff --git a/src/backend/tests/.test_durations b/src/backend/tests/.test_durations new file mode 100644 index 000000000000..1c8db054a1f5 --- /dev/null +++ b/src/backend/tests/.test_durations @@ -0,0 +1,354 @@ +{ + "src/backend/tests/test_endpoints.py::test_build_vertex_invalid_flow_id": 3.1494096249807626, + "src/backend/tests/test_endpoints.py::test_build_vertex_invalid_vertex_id": 3.0606157919974066, + "src/backend/tests/test_endpoints.py::test_get_all": 10.10167008501594, + "src/backend/tests/test_endpoints.py::test_get_vertices": 4.5017141660209745, + "src/backend/tests/test_endpoints.py::test_get_vertices_flow_not_found": 3.7886676250200253, + "src/backend/tests/test_endpoints.py::test_invalid_flow_id": 4.073716707964195, + "src/backend/tests/test_endpoints.py::test_invalid_prompt": 2.7002592499775346, + "src/backend/tests/test_endpoints.py::test_invalid_run_with_input_type_chat": 2.987766916019609, + "src/backend/tests/test_endpoints.py::test_post_validate_code": 3.0467621669813525, + "src/backend/tests/test_endpoints.py::test_successful_run_with_input_type_any": 14.8548604179814, + "src/backend/tests/test_endpoints.py::test_successful_run_with_input_type_chat": 6.242352208995726, + "src/backend/tests/test_endpoints.py::test_successful_run_with_input_type_text": 5.7594154170074034, + "src/backend/tests/test_endpoints.py::test_successful_run_with_output_type_any": 7.347130999987712, + "src/backend/tests/test_endpoints.py::test_successful_run_with_output_type_debug": 6.291947416990297, + "src/backend/tests/test_endpoints.py::test_successful_run_with_output_type_text": 14.872085083043203, + "src/backend/tests/test_endpoints.py::test_valid_prompt": 2.7850471249839757, + "src/backend/tests/test_endpoints.py::test_various_prompts[The weather is {weather} today.-expected_input_variables1]": 2.535564499994507, + "src/backend/tests/test_endpoints.py::test_various_prompts[This prompt has no variables.-expected_input_variables2]": 9.15231529099401, + "src/backend/tests/test_endpoints.py::test_various_prompts[{a}, {b}, and {c} are variables.-expected_input_variables3]": 2.640623040992068, + "src/backend/tests/test_endpoints.py::test_various_prompts[{color} is my favorite color.-expected_input_variables0]": 2.079908042011084, + "src/backend/tests/test_messages_endpoints.py::test_delete_messages": 2.515260499989381, + "src/backend/tests/test_messages_endpoints.py::test_delete_messages_session": 2.3651068749895785, + "src/backend/tests/test_messages_endpoints.py::test_update_message": 2.5627032090269495, + "src/backend/tests/test_messages_endpoints.py::test_update_message_not_found": 3.504595792008331, + "src/backend/tests/test_schema.py::TestInput::test_field_type_str": 0.0005162910092622042, + "src/backend/tests/test_schema.py::TestInput::test_field_type_type": 0.0002682080084923655, + "src/backend/tests/test_schema.py::TestInput::test_input_to_dict": 0.0003857500269077718, + "src/backend/tests/test_schema.py::TestInput::test_invalid_field_type": 0.00031291699269786477, + "src/backend/tests/test_schema.py::TestInput::test_post_process_type_function": 0.0005505419976543635, + "src/backend/tests/test_schema.py::TestInput::test_serialize_field_type": 0.0002683750062715262, + "src/backend/tests/test_schema.py::TestInput::test_validate_type_class": 0.0003414590028114617, + "src/backend/tests/test_schema.py::TestInput::test_validate_type_string": 0.0002427089784760028, + "src/backend/tests/test_schema.py::TestOutput::test_output_add_types": 0.000245749979512766, + "src/backend/tests/test_schema.py::TestOutput::test_output_default": 0.00026183397858403623, + "src/backend/tests/test_schema.py::TestOutput::test_output_set_selected": 0.0003107920056208968, + "src/backend/tests/test_schema.py::TestOutput::test_output_to_dict": 0.0004964589898008853, + "src/backend/tests/test_schema.py::TestOutput::test_output_validate_display_name": 0.0005334159650374204, + "src/backend/tests/test_schema.py::TestOutput::test_output_validate_model": 0.00029370796983130276, + "src/backend/tests/test_schema.py::TestPostProcessType::test_custom_type": 0.001362041017273441, + "src/backend/tests/test_schema.py::TestPostProcessType::test_int_type": 0.00023837501066736877, + "src/backend/tests/test_schema.py::TestPostProcessType::test_list_custom_type": 0.004543458024272695, + "src/backend/tests/test_schema.py::TestPostProcessType::test_list_int_type": 0.0002362079976592213, + "src/backend/tests/test_schema.py::TestPostProcessType::test_union_custom_type": 0.0005842499958816916, + "src/backend/tests/test_schema.py::TestPostProcessType::test_union_type": 0.003973040962591767, + "src/backend/tests/test_user.py::test_add_user": 3.298028166987933, + "src/backend/tests/test_user.py::test_data_consistency_after_delete": 10.030325876054121, + "src/backend/tests/test_user.py::test_data_consistency_after_update": 2.9754588740179315, + "src/backend/tests/test_user.py::test_deactivated_user_cannot_access": 3.544328290998237, + "src/backend/tests/test_user.py::test_deactivated_user_cannot_login": 3.9071091239748057, + "src/backend/tests/test_user.py::test_delete_user": 4.161238500004401, + "src/backend/tests/test_user.py::test_delete_user_wrong_id": 2.7632550839625765, + "src/backend/tests/test_user.py::test_inactive_user": 3.334006417018827, + "src/backend/tests/test_user.py::test_normal_user_cant_delete_user": 2.9729639159922954, + "src/backend/tests/test_user.py::test_normal_user_cant_read_all_users": 2.6966073329967912, + "src/backend/tests/test_user.py::test_patch_reset_password": 11.245606623997446, + "src/backend/tests/test_user.py::test_patch_user": 3.2588992070232052, + "src/backend/tests/test_user.py::test_patch_user_wrong_id": 3.3168086239602417, + "src/backend/tests/test_user.py::test_read_all_users": 2.440687207999872, + "src/backend/tests/test_user.py::test_user_waiting_for_approval": 9.475323291990208, + "src/backend/tests/test_webhook.py::test_webhook_endpoint": 12.348436542029958, + "src/backend/tests/test_webhook.py::test_webhook_flow_on_run_endpoint": 15.205204916041112, + "src/backend/tests/test_webhook.py::test_webhook_with_random_payload": 8.713364291994367, + "src/backend/tests/unit/components/prompts/test_prompt_component.py::TestPromptComponent::test_post_code_processing": 0.008164541970472783, + "src/backend/tests/unit/custom/custom_component/test_component.py::test_set_invalid_output": 0.000454624998383224, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph": 0.007537708996096626, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional": 0.020996668026782572, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_async_start": 0.009653333021560684, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start": 0.008162209036527202, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_functional_start_end": 0.06379004201153293, + "src/backend/tests/unit/graph/graph/test_base.py::test_graph_not_prepared": 0.01988037399132736, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_add_to_vertices_being_run": 2.476791499997489, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled": 2.3258769580570515, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_are_all_predecessors_fulfilled__wrong": 2.4165378749894444, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_build_run_map": 2.5142138760129455, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict": 2.629594833997544, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_map__bad_case": 8.874073583021527, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_run_predecessors__bad_case": 2.743527958955383, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_being_run__bad_case": 2.8369890000030864, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_from_dict_without_vertices_to_run__bad_case": 2.9151457909611054, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable": 8.908991582982708, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_is_active": 2.637443292012904, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_run_predecessors": 2.747438082966255, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_is_vertex_runnable__wrong_vertices_to_run": 2.8337462919880636, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_pickle": 2.065233791974606, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_from_predecessors": 8.867784041009145, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_remove_vertex_from_runnables": 2.2803797090018634, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_to_dict": 9.405242958950112, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_run_state": 2.4422846660017967, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state": 2.9607737089972943, + "src/backend/tests/unit/graph/graph/test_runnable_vertices_manager.py::test_update_vertex_run_state__bad_case": 2.485696541989455, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_a": 8.826332042983267, + "src/backend/tests/unit/graph/graph/test_utils.py::test_get_successors_z": 2.949440208991291, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_a": 2.8428027920017485, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_g": 3.315444208041299, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_h": 2.983557416999247, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_invalid_vertex": 1.9296646670263726, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_m": 8.736605707992567, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_n_is_start": 2.5265350410190877, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_t": 2.3543146679585334, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_x": 2.094447916984791, + "src/backend/tests/unit/graph/graph/test_utils.py::test_sort_up_to_vertex_z": 2.7899541249789763, + "src/backend/tests/unit/graph/test_graph.py::test_build_edges": 2.053889958973741, + "src/backend/tests/unit/graph/test_graph.py::test_build_nodes": 3.264545250014635, + "src/backend/tests/unit/graph/test_graph.py::test_build_params": 1.6351483759935945, + "src/backend/tests/unit/graph/test_graph.py::test_circular_dependencies": 4.829830207978375, + "src/backend/tests/unit/graph/test_graph.py::test_find_last_node": 1.8075883749988861, + "src/backend/tests/unit/graph/test_graph.py::test_get_node": 2.2939607900043484, + "src/backend/tests/unit/graph/test_graph.py::test_get_node_neighbors_basic": 2.5666640420095064, + "src/backend/tests/unit/graph/test_graph.py::test_get_root_vertex": 2.33814408298349, + "src/backend/tests/unit/graph/test_graph.py::test_get_vertices_with_target": 2.0869384160032496, + "src/backend/tests/unit/graph/test_graph.py::test_graph_structure": 3.1825925829762127, + "src/backend/tests/unit/graph/test_graph.py::test_invalid_node_types": 2.1994956269918475, + "src/backend/tests/unit/graph/test_graph.py::test_matched_type": 2.3932184999866877, + "src/backend/tests/unit/graph/test_graph.py::test_pickle_graph": 2.184392209019279, + "src/backend/tests/unit/graph/test_graph.py::test_process_flow": 2.1272420000168495, + "src/backend/tests/unit/graph/test_graph.py::test_process_flow_one_group": 1.9646992909838445, + "src/backend/tests/unit/graph/test_graph.py::test_process_flow_vector_store_grouped": 2.415951082977699, + "src/backend/tests/unit/graph/test_graph.py::test_set_new_target_handle": 1.7951639160164632, + "src/backend/tests/unit/graph/test_graph.py::test_ungroup_node": 2.0279515830043238, + "src/backend/tests/unit/graph/test_graph.py::test_update_source_handle": 2.0684266670432407, + "src/backend/tests/unit/graph/test_graph.py::test_update_target_handle_proxy": 1.5892521249479614, + "src/backend/tests/unit/graph/test_graph.py::test_update_template": 1.7316221670189407, + "src/backend/tests/unit/graph/test_graph.py::test_validate_edges": 1.920275832992047, + "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot": 2.1822710419946816, + "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_components_and_edges": 1.6609269159962423, + "src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py::test_memory_chatbot_dump_structure": 2.5239112499984913, + "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag": 0.08740387603756972, + "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump": 0.027446749998489395, + "src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py::test_vector_store_rag_dump_components_and_edges": 0.026998917979653925, + "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_invalid": 0.0006447900377679616, + "src/backend/tests/unit/inputs/test_inputs.py::test_bool_input_valid": 0.00023025000700727105, + "src/backend/tests/unit/inputs/test_inputs.py::test_data_input_valid": 0.0005409169825725257, + "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_invalid": 0.00025333400117233396, + "src/backend/tests/unit/inputs/test_inputs.py::test_dict_input_valid": 0.0007362900068983436, + "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_invalid": 0.0009356669906992465, + "src/backend/tests/unit/inputs/test_inputs.py::test_dropdown_input_valid": 0.0004829160461667925, + "src/backend/tests/unit/inputs/test_inputs.py::test_file_input_valid": 0.00024799996754154563, + "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_invalid": 0.00025445802020840347, + "src/backend/tests/unit/inputs/test_inputs.py::test_float_input_valid": 0.00024087497149594128, + "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_invalid": 0.0003797499812208116, + "src/backend/tests/unit/inputs/test_inputs.py::test_handle_input_valid": 0.0002122489968314767, + "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_comprehensive": 0.001500750018749386, + "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_invalid": 0.00027112496900372207, + "src/backend/tests/unit/inputs/test_inputs.py::test_instantiate_input_valid": 0.0005940830160398036, + "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_invalid": 0.00029999902471899986, + "src/backend/tests/unit/inputs/test_inputs.py::test_int_input_valid": 0.0004179589741397649, + "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_invalid": 0.0004274170205462724, + "src/backend/tests/unit/inputs/test_inputs.py::test_message_text_input_valid": 0.0003730839816853404, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_invalid": 0.00033208398963324726, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_input_valid": 0.0003288750012870878, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_invalid": 0.00023066697758622468, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiline_secret_input_valid": 0.00022683301358483732, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_invalid": 0.0002583339810371399, + "src/backend/tests/unit/inputs/test_inputs.py::test_multiselect_input_valid": 0.00027691599098034203, + "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_invalid": 0.00041799998143687844, + "src/backend/tests/unit/inputs/test_inputs.py::test_nested_dict_input_valid": 0.004873417055932805, + "src/backend/tests/unit/inputs/test_inputs.py::test_prompt_input_valid": 0.0007432089769281447, + "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_invalid": 0.00033837597584351897, + "src/backend/tests/unit/inputs/test_inputs.py::test_secret_str_input_valid": 0.00022083398653194308, + "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_invalid": 0.0004425400111358613, + "src/backend/tests/unit/inputs/test_inputs.py::test_str_input_valid": 0.0003231659939046949, + "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_invalid": 0.010438332974445075, + "src/backend/tests/unit/inputs/test_inputs.py::test_table_input_valid": 0.0003158329927828163, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_async_prompt_serialization": 0.4811764149926603, + "src/backend/tests/unit/schema/test_schema_message.py::test_message_prompt_serialization": 0.0009208739793393761, + "src/backend/tests/unit/test_api_key.py::test_create_api_key": 2.661131207976723, + "src/backend/tests/unit/test_api_key.py::test_delete_api_key": 2.671240749012213, + "src/backend/tests/unit/test_api_key.py::test_get_api_keys": 2.5765499170229305, + "src/backend/tests/unit/test_cache.py::test_build_graph": 3.0347800820018165, + "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow": 8.144330751005327, + "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_from_request_data": 5.783452290983405, + "src/backend/tests/unit/test_chat_endpoint.py::test_build_flow_with_frozen_path": 7.61259591698763, + "src/backend/tests/unit/test_cli.py::test_components_path": 2.2392006250447594, + "src/backend/tests/unit/test_cli.py::test_superuser": 2.476592207007343, + "src/backend/tests/unit/test_custom_component.py::test_build_config_field_keys": 2.0540905419911724, + "src/backend/tests/unit/test_custom_component.py::test_build_config_field_value_keys": 1.9443145420227665, + "src/backend/tests/unit/test_custom_component.py::test_build_config_field_values_dict": 3.319497832970228, + "src/backend/tests/unit/test_custom_component.py::test_build_config_fields_dict": 1.8987905430258252, + "src/backend/tests/unit/test_custom_component.py::test_build_config_has_fields": 2.7343585419876035, + "src/backend/tests/unit/test_custom_component.py::test_build_config_no_code": 1.792709333007224, + "src/backend/tests/unit/test_custom_component.py::test_build_config_return_type": 2.3822355829761364, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_get_tree": 1.952277623990085, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_init": 2.0454807080095634, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_ann_assign": 2.451507583988132, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_no_annotation": 2.353039957990404, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_arg_with_annotation": 1.7640878760430496, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_assign": 2.0938420000020415, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_callable_details_no_args": 1.7373172499937937, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_classes": 1.7848410429724026, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_init": 2.2325071259983815, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_function_def_not_init": 2.4196665409835987, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_functions": 2.178845665999688, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_global_vars": 2.4417894990183413, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_import": 1.707990164984949, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_parse_imports_importfrom": 2.1835803739959374, + "src/backend/tests/unit/test_custom_component.py::test_code_parser_syntax_error": 2.277529457001947, + "src/backend/tests/unit/test_custom_component.py::test_component_code_null_error": 1.9170069170068018, + "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree": 1.8569252080051228, + "src/backend/tests/unit/test_custom_component.py::test_component_get_code_tree_syntax_error": 2.174606874003075, + "src/backend/tests/unit/test_custom_component.py::test_component_get_function_valid": 1.9588903749827296, + "src/backend/tests/unit/test_custom_component.py::test_component_init": 2.5707569160149433, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_not_implemented": 2.3832541239680722, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_build_template_config": 1.767908540990902, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_class_template_validation_no_code": 1.9784962920530234, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_code_tree_syntax_error": 2.0324612080003135, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function": 1.6494328340049833, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args": 1.9188839579874184, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_args_no_args": 2.0736736260005273, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type": 2.199567042000126, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_entrypoint_return_type_no_return_type": 1.7597685409709811, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_function_valid": 2.1681128749914933, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name": 1.658911792008439, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_get_main_class_name_no_main_class": 1.4603167499881238, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_init": 2.329375083994819, + "src/backend/tests/unit/test_custom_component.py::test_custom_component_multiple_outputs": 1.8187729580095038, + "src/backend/tests/unit/test_custom_component.py::test_list_flows_flow_objects": 1.981454541994026, + "src/backend/tests/unit/test_custom_component.py::test_list_flows_return_type": 1.8733046670095064, + "src/backend/tests/unit/test_data_class.py::test_add_method_for_integers": 1.7596916669863276, + "src/backend/tests/unit/test_data_class.py::test_add_method_for_strings": 2.099744750012178, + "src/backend/tests/unit/test_data_class.py::test_add_method_with_non_overlapping_keys": 2.0075557490054052, + "src/backend/tests/unit/test_data_class.py::test_conversion_from_document": 1.9583345409773756, + "src/backend/tests/unit/test_data_class.py::test_conversion_to_document": 1.953191417036578, + "src/backend/tests/unit/test_data_class.py::test_custom_attribute_get_set_del": 2.8074895000027027, + "src/backend/tests/unit/test_data_class.py::test_custom_attribute_setting_and_getting": 1.744376168033341, + "src/backend/tests/unit/test_data_class.py::test_data_initialization": 2.3415857510408387, + "src/backend/tests/unit/test_data_class.py::test_deep_copy": 1.5598407920042519, + "src/backend/tests/unit/test_data_class.py::test_dir_includes_data_keys": 2.4137807070219424, + "src/backend/tests/unit/test_data_class.py::test_dir_reflects_attribute_deletion": 1.8897194170276634, + "src/backend/tests/unit/test_data_class.py::test_get_text_with_empty_data": 2.669506582984468, + "src/backend/tests/unit/test_data_class.py::test_get_text_with_none_data": 1.8896955420204904, + "src/backend/tests/unit/test_data_class.py::test_get_text_with_text_key": 1.953111374983564, + "src/backend/tests/unit/test_data_class.py::test_get_text_without_text_key": 1.9460047910106368, + "src/backend/tests/unit/test_data_class.py::test_str_and_dir_methods": 2.6938894579943735, + "src/backend/tests/unit/test_data_class.py::test_validate_data_with_extra_keys": 1.9336464170191903, + "src/backend/tests/unit/test_data_components.py::test_build_with_multiple_urls": 0.026251333008985966, + "src/backend/tests/unit/test_data_components.py::test_directory_component_build_with_multithreading": 0.0020231239905115217, + "src/backend/tests/unit/test_data_components.py::test_directory_without_mocks": 0.38643029099330306, + "src/backend/tests/unit/test_data_components.py::test_failed_request": 0.011844915978144854, + "src/backend/tests/unit/test_data_components.py::test_parse_curl": 0.0004114170151297003, + "src/backend/tests/unit/test_data_components.py::test_successful_get_request": 0.015994457993656397, + "src/backend/tests/unit/test_data_components.py::test_timeout": 0.01364304099115543, + "src/backend/tests/unit/test_data_components.py::test_url_component": 0.5631265829724725, + "src/backend/tests/unit/test_database.py::test_create_flow": 2.5350006250082515, + "src/backend/tests/unit/test_database.py::test_create_flow_with_invalid_data": 2.6853410840267316, + "src/backend/tests/unit/test_database.py::test_create_flows": 3.4552309999999125, + "src/backend/tests/unit/test_database.py::test_delete_flow": 4.201302792993374, + "src/backend/tests/unit/test_database.py::test_delete_flows": 2.73355954195722, + "src/backend/tests/unit/test_database.py::test_delete_flows_with_transaction_and_build": 3.3879740410193335, + "src/backend/tests/unit/test_database.py::test_delete_nonexistent_flow": 2.906195083982311, + "src/backend/tests/unit/test_database.py::test_download_file": 2.6433084169693757, + "src/backend/tests/unit/test_database.py::test_get_nonexistent_flow": 2.9141747919784393, + "src/backend/tests/unit/test_database.py::test_load_flows": 2.3472657920210622, + "src/backend/tests/unit/test_database.py::test_migrate_transactions": 2.4188965820358135, + "src/backend/tests/unit/test_database.py::test_migrate_transactions_no_duckdb": 2.4176759159890935, + "src/backend/tests/unit/test_database.py::test_read_flow": 2.524181623972254, + "src/backend/tests/unit/test_database.py::test_read_flows": 3.3437811249750666, + "src/backend/tests/unit/test_database.py::test_read_only_starter_projects": 2.8177391680073924, + "src/backend/tests/unit/test_database.py::test_sqlite_pragmas": 2.2383368749869987, + "src/backend/tests/unit/test_database.py::test_update_flow": 3.1579460009816103, + "src/backend/tests/unit/test_database.py::test_update_flow_idempotency": 2.9125417500035837, + "src/backend/tests/unit/test_database.py::test_update_nonexistent_flow": 2.838372750993585, + "src/backend/tests/unit/test_database.py::test_upload_file": 2.6103912079997826, + "src/backend/tests/unit/test_experimental_components.py::test_python_function_component": 2.076999415992759, + "src/backend/tests/unit/test_files.py::test_delete_file": 2.799217874009628, + "src/backend/tests/unit/test_files.py::test_download_file": 2.51829199999338, + "src/backend/tests/unit/test_files.py::test_file_operations": 3.3802113739948254, + "src/backend/tests/unit/test_files.py::test_list_files": 2.7689662509947084, + "src/backend/tests/unit/test_files.py::test_upload_file": 3.3243832079751883, + "src/backend/tests/unit/test_frontend_nodes.py::test_frontend_node_to_dict": 2.1432127919979393, + "src/backend/tests/unit/test_frontend_nodes.py::test_template_field_defaults": 2.2584460410289466, + "src/backend/tests/unit/test_frontend_nodes.py::test_template_to_dict": 2.8126436240272596, + "src/backend/tests/unit/test_helper_components.py::test_data_as_text_component": 2.214979208976729, + "src/backend/tests/unit/test_helper_components.py::test_uuid_generator_component": 3.2323666680022143, + "src/backend/tests/unit/test_initial_setup.py::test_create_or_update_starter_projects": 2.2390285000146832, + "src/backend/tests/unit/test_initial_setup.py::test_get_project_data": 2.6407637080119457, + "src/backend/tests/unit/test_initial_setup.py::test_load_starter_projects": 2.2846807509777136, + "src/backend/tests/unit/test_initial_setup.py::test_refresh_starter_projects": 5.397776041994803, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_create_secret": 2.1846052090113517, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_delete_secret": 2.4931142500427086, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_email_address": 3.9143964989925735, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_string": 2.658771957969293, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_encode_uuid": 3.536810541001614, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_ends_with_non_alphanumeric": 2.6400313759804703, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_get_secret": 2.2426519600267056, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_long_string": 2.5769125409715343, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_starts_with_non_alphanumeric": 2.9591099170211237, + "src/backend/tests/unit/test_kubernetes_secrets.py::test_uuid_case_insensitivity": 2.8027220430085436, + "src/backend/tests/unit/test_loading.py::test_load_flow_from_json": 2.0592095839965623, + "src/backend/tests/unit/test_loading.py::test_load_flow_from_json_object": 0.05121591599890962, + "src/backend/tests/unit/test_loading.py::test_load_flow_from_json_with_tweaks": 2.3594004999904428, + "src/backend/tests/unit/test_logger.py::test_enabled": 2.366979167010868, + "src/backend/tests/unit/test_logger.py::test_get_after_timestamp": 2.843343543005176, + "src/backend/tests/unit/test_logger.py::test_get_before_timestamp": 2.0495081240078434, + "src/backend/tests/unit/test_logger.py::test_get_last_n": 3.3493437920114957, + "src/backend/tests/unit/test_logger.py::test_init_default": 4.3632337910239585, + "src/backend/tests/unit/test_logger.py::test_init_with_env_variable": 2.747672124998644, + "src/backend/tests/unit/test_logger.py::test_len": 3.0128796670178417, + "src/backend/tests/unit/test_logger.py::test_max_size": 2.5830446239560843, + "src/backend/tests/unit/test_logger.py::test_write": 3.656159915990429, + "src/backend/tests/unit/test_logger.py::test_write_overflow": 4.817402709042653, + "src/backend/tests/unit/test_login.py::test_login_successful": 3.081307165994076, + "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_password": 3.23898391702096, + "src/backend/tests/unit/test_login.py::test_login_unsuccessful_wrong_username": 2.315841832984006, + "src/backend/tests/unit/test_messages.py::test_add_messages": 2.261571748997085, + "src/backend/tests/unit/test_messages.py::test_add_messagetables": 2.3893967490294017, + "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[convert_to_langchain_type]": 3.2416470000171103, + "src/backend/tests/unit/test_messages.py::test_convert_to_langchain[message]": 2.004590749013005, + "src/backend/tests/unit/test_messages.py::test_delete_messages": 2.3871561660198495, + "src/backend/tests/unit/test_messages.py::test_get_messages": 2.0157879999896977, + "src/backend/tests/unit/test_messages.py::test_store_message": 1.9027003319642972, + "src/backend/tests/unit/test_process.py::test_load_langchain_object_with_cached_session": 2.2040907500195317, + "src/backend/tests/unit/test_process.py::test_load_langchain_object_with_no_cached_session": 2.883888084004866, + "src/backend/tests/unit/test_process.py::test_load_langchain_object_without_session_id": 2.329415375017561, + "src/backend/tests/unit/test_process.py::test_multiple_tweaks": 2.047054874972673, + "src/backend/tests/unit/test_process.py::test_no_tweaks": 1.952550498972414, + "src/backend/tests/unit/test_process.py::test_single_tweak": 2.148759791016346, + "src/backend/tests/unit/test_process.py::test_tweak_no_node_id": 3.1278445839998312, + "src/backend/tests/unit/test_process.py::test_tweak_not_in_template": 2.113566374988295, + "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_default_superuser": 2.256663625012152, + "src/backend/tests/unit/test_setup_superuser.py::test_teardown_superuser_no_default_superuser": 2.211974083009409, + "src/backend/tests/unit/test_telemetry.py::test_gauge": 2.4124685839633457, + "src/backend/tests/unit/test_telemetry.py::test_gauge_with_counter_method": 2.186464792001061, + "src/backend/tests/unit/test_telemetry.py::test_gauge_with_historgram_method": 2.6113254159863573, + "src/backend/tests/unit/test_telemetry.py::test_gauge_with_up_down_counter_method": 2.225708084035432, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter": 2.130592000001343, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter_empty_label": 2.2976541249954607, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter_missing_mandatory_label": 2.443581625993829, + "src/backend/tests/unit/test_telemetry.py::test_increment_counter_unregisted_metric": 2.630037208989961, + "src/backend/tests/unit/test_telemetry.py::test_init": 2.1476573330292013, + "src/backend/tests/unit/test_telemetry.py::test_missing_labels": 2.3570764580217656, + "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton": 2.390594750002492, + "src/backend/tests/unit/test_telemetry.py::test_multithreaded_singleton_race_condition": 2.437567832006607, + "src/backend/tests/unit/test_telemetry.py::test_opentelementry_singleton": 3.0913529580284376, + "src/backend/tests/unit/test_template.py::test_build_template_from_function": 2.9347434579976834, + "src/backend/tests/unit/test_template.py::test_get_base_classes": 2.118878333014436, + "src/backend/tests/unit/test_template.py::test_get_default_factory": 2.143760042003123, + "src/backend/tests/unit/test_validate_code.py::test_create_function": 2.2330028339929413, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_function": 1.7509510009840596, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_module": 1.850503541965736, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_missing_schema": 1.81301379200886, + "src/backend/tests/unit/test_validate_code.py::test_execute_function_success": 2.074162457982311, + "src/backend/tests/unit/test_validate_code.py::test_validate_code": 2.2336132919881493, + "src/backend/tests/unit/test_version.py::test_compute_main": 2.139949625969166, + "src/backend/tests/unit/test_version.py::test_version": 1.6260940420324914, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol::password@host-protocol::password@host]": 0.0012022080190945417, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa:ss:word@host-protocol:user:pa:ss:word@host]": 0.000848833005875349, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pa@ss@word@host-protocol:user:pa%40ss%40word@host]": 0.001619456976186484, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:pass@word@host-protocol:user:pass%40word@host]": 0.000586748996283859, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@-protocol:user:password@]": 0.0025060399784706533, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user:password@host-protocol:user:password@host]": 0.0007859579636715353, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[protocol:user@host-protocol:user@host]": 0.0004099990183021873, + "src/backend/tests/unit/utils/test_connection_string_parser.py::test_transform_connection_string[user:password@host-user:password@host]": 0.0005190849769860506 +} \ No newline at end of file diff --git a/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py b/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py index 3fd135de288b..afe24441cef2 100644 --- a/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py +++ b/src/backend/tests/unit/initial_setup/starter_projects/test_memory_chatbot.py @@ -1,5 +1,7 @@ from collections import deque +import pytest + from langflow.components.helpers.Memory import MemoryComponent from langflow.components.inputs.ChatInput import ChatInput from langflow.components.models.OpenAIModel import OpenAIModelComponent @@ -7,9 +9,11 @@ from langflow.components.prompts.Prompt import PromptComponent from langflow.graph import Graph from langflow.graph.graph.constants import Finish +from langflow.graph.graph.schema import GraphDump -def test_memory_chatbot(): +@pytest.fixture +def memory_chatbot_graph(): session_id = "test_session_id" template = """{context} @@ -32,10 +36,87 @@ def test_memory_chatbot(): chat_output.set(input_value=openai_component.text_response) graph = Graph(chat_input, chat_output) + return graph + + +def test_memory_chatbot(memory_chatbot_graph): # Now we run step by step expected_order = deque(["chat_input", "chat_memory", "prompt", "openai", "chat_output"]) for step in expected_order: - result = graph.step() + result = memory_chatbot_graph.step() if isinstance(result, Finish): break assert step == result.vertex.id + + +def test_memory_chatbot_dump_structure(memory_chatbot_graph: Graph): + # Now we run step by step + graph_dict = memory_chatbot_graph.dump( + name="Memory Chatbot", description="A memory chatbot", endpoint_name="membot" + ) + assert isinstance(graph_dict, dict) + # Test structure + assert "data" in graph_dict + assert "is_component" in graph_dict + + data_dict = graph_dict["data"] + assert "nodes" in data_dict + assert "edges" in data_dict + assert "description" in graph_dict + assert "endpoint_name" in graph_dict + + # Test data + nodes = data_dict["nodes"] + edges = data_dict["edges"] + description = graph_dict["description"] + endpoint_name = graph_dict["endpoint_name"] + + assert len(nodes) == 5 + assert len(edges) == 4 + assert description is not None + assert endpoint_name is not None + + +def test_memory_chatbot_dump_components_and_edges(memory_chatbot_graph: Graph): + # Check all components and edges were dumped correctly + graph_dict: GraphDump = memory_chatbot_graph.dump( + name="Memory Chatbot", description="A memory chatbot", endpoint_name="membot" + ) + + data_dict = graph_dict["data"] + nodes = data_dict["nodes"] + edges = data_dict["edges"] + + # sort the nodes by id + nodes = sorted(nodes, key=lambda x: x["id"]) + + # Check each node + assert nodes[0]["data"]["type"] == "ChatInput" + assert nodes[0]["id"] == "chat_input" + + assert nodes[1]["data"]["type"] == "MemoryComponent" + assert nodes[1]["id"] == "chat_memory" + + assert nodes[2]["data"]["type"] == "ChatOutput" + assert nodes[2]["id"] == "chat_output" + + assert nodes[3]["data"]["type"] == "OpenAIModelComponent" + assert nodes[3]["id"] == "openai" + + assert nodes[4]["data"]["type"] == "PromptComponent" + assert nodes[4]["id"] == "prompt" + + # Check edges + expected_edges = [ + ("chat_input", "prompt"), + ("chat_memory", "prompt"), + ("prompt", "openai"), + ("openai", "chat_output"), + ] + + assert len(edges) == len(expected_edges) + + for edge in edges: + source = edge["source"] + target = edge["target"] + assert (source, target) in expected_edges, edge diff --git a/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py b/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py index 1482ed556212..95fc5a4e74d6 100644 --- a/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py +++ b/src/backend/tests/unit/initial_setup/starter_projects/test_vector_store_rag.py @@ -1,5 +1,7 @@ from textwrap import dedent +import pytest + from langflow.components.data.File import FileComponent from langflow.components.embeddings.OpenAIEmbeddings import OpenAIEmbeddingsComponent from langflow.components.helpers.ParseData import ParseDataComponent @@ -14,10 +16,17 @@ from langflow.schema.data import Data -def test_vector_store_rag(): +@pytest.fixture +def client(): + pass + + +@pytest.fixture +def ingestion_graph(): # Ingestion Graph file_component = FileComponent(_id="file-123") file_component.set(path="test.txt") + file_component.set_output_value("data", Data(text="This is a test file.")) text_splitter = SplitTextComponent(_id="text-splitter-123") text_splitter.set(data_inputs=file_component.load_file) openai_embeddings = OpenAIEmbeddingsComponent(_id="openai-embeddings-123") @@ -31,8 +40,18 @@ def test_vector_store_rag(): api_endpoint="https://astra.example.com", token="token", ) + vector_store.set_output_value("vector_store", "mock_vector_store") + vector_store.set_output_value("base_retriever", "mock_retriever") + vector_store.set_output_value("search_results", [Data(text="This is a test file.")]) + ingestion_graph = Graph(file_component, vector_store) + return ingestion_graph + + +@pytest.fixture +def rag_graph(): # RAG Graph + openai_embeddings = OpenAIEmbeddingsComponent(_id="openai-embeddings-124") chat_input = ChatInput(_id="chatinput-123") chat_input.get_output("message").value = "What is the meaning of life?" rag_vector_store = AstraVectorStoreComponent(_id="rag-vector-store-123") @@ -69,21 +88,160 @@ def test_vector_store_rag(): chat_output.set(input_value=openai_component.text_response) graph = Graph(start=chat_input, end=chat_output) - assert graph is not None - ids = [ + return graph + + +def test_vector_store_rag(ingestion_graph, rag_graph): + assert ingestion_graph is not None + ingestion_ids = [ + "file-123", + "text-splitter-123", + "openai-embeddings-123", + "vector-store-123", + ] + assert rag_graph is not None + rag_ids = [ "chatinput-123", "chatoutput-123", "openai-123", "parse-data-123", "prompt-123", "rag-vector-store-123", - "openai-embeddings-123", + "openai-embeddings-124", + ] + for ids, graph, len_results in zip([ingestion_ids, rag_ids], [ingestion_graph, rag_graph], [5, 8]): + results = [] + for result in graph.start(): + results.append(result) + + assert len(results) == len_results + vids = [result.vertex.id for result in results if hasattr(result, "vertex")] + assert all(vid in ids for vid in vids), f"Diff: {set(vids) - set(ids)}" + assert results[-1] == Finish() + + +def test_vector_store_rag_dump_components_and_edges(ingestion_graph, rag_graph): + # Test ingestion graph components and edges + ingestion_graph_dump = ingestion_graph.dump( + name="Ingestion Graph", description="Graph for data ingestion", endpoint_name="ingestion" + ) + + ingestion_data = ingestion_graph_dump["data"] + ingestion_nodes = ingestion_data["nodes"] + ingestion_edges = ingestion_data["edges"] + + # Sort nodes by id to check components + ingestion_nodes = sorted(ingestion_nodes, key=lambda x: x["id"]) + + # Check components in the ingestion graph + assert ingestion_nodes[0]["data"]["type"] == "FileComponent" + assert ingestion_nodes[0]["id"] == "file-123" + + assert ingestion_nodes[1]["data"]["type"] == "OpenAIEmbeddingsComponent" + assert ingestion_nodes[1]["id"] == "openai-embeddings-123" + + assert ingestion_nodes[2]["data"]["type"] == "SplitTextComponent" + assert ingestion_nodes[2]["id"] == "text-splitter-123" + + assert ingestion_nodes[3]["data"]["type"] == "AstraVectorStoreComponent" + assert ingestion_nodes[3]["id"] == "vector-store-123" + + # Check edges in the ingestion graph + expected_ingestion_edges = [ + ("file-123", "text-splitter-123"), + ("text-splitter-123", "vector-store-123"), + ("openai-embeddings-123", "vector-store-123"), + ] + assert len(ingestion_edges) == len(expected_ingestion_edges) + + for edge in ingestion_edges: + source = edge["source"] + target = edge["target"] + assert (source, target) in expected_ingestion_edges, edge + + # Test RAG graph components and edges + rag_graph_dump = rag_graph.dump( + name="RAG Graph", description="Graph for Retrieval-Augmented Generation", endpoint_name="rag" + ) + + rag_data = rag_graph_dump["data"] + rag_nodes = rag_data["nodes"] + rag_edges = rag_data["edges"] + + # Sort nodes by id to check components + rag_nodes = sorted(rag_nodes, key=lambda x: x["id"]) + + # Check components in the RAG graph + assert rag_nodes[0]["data"]["type"] == "ChatInput" + assert rag_nodes[0]["id"] == "chatinput-123" + + assert rag_nodes[1]["data"]["type"] == "ChatOutput" + assert rag_nodes[1]["id"] == "chatoutput-123" + + assert rag_nodes[2]["data"]["type"] == "OpenAIModelComponent" + assert rag_nodes[2]["id"] == "openai-123" + + assert rag_nodes[3]["data"]["type"] == "OpenAIEmbeddingsComponent" + assert rag_nodes[3]["id"] == "openai-embeddings-124" + + assert rag_nodes[4]["data"]["type"] == "ParseDataComponent" + assert rag_nodes[4]["id"] == "parse-data-123" + + assert rag_nodes[5]["data"]["type"] == "PromptComponent" + assert rag_nodes[5]["id"] == "prompt-123" + + assert rag_nodes[6]["data"]["type"] == "AstraVectorStoreComponent" + assert rag_nodes[6]["id"] == "rag-vector-store-123" + + # Check edges in the RAG graph + expected_rag_edges = [ + ("chatinput-123", "rag-vector-store-123"), + ("openai-embeddings-124", "rag-vector-store-123"), + ("chatinput-123", "prompt-123"), + ("rag-vector-store-123", "parse-data-123"), + ("parse-data-123", "prompt-123"), + ("prompt-123", "openai-123"), + ("openai-123", "chatoutput-123"), ] - results = [] - for result in graph.start(): - results.append(result) - - assert len(results) == 8 - vids = [result.vertex.id for result in results if hasattr(result, "vertex")] - assert all(vid in ids for vid in vids), f"Diff: {set(vids) - set(ids)}" - assert results[-1] == Finish() + assert len(rag_edges) == len(expected_rag_edges), rag_edges + + for edge in rag_edges: + source = edge["source"] + target = edge["target"] + assert (source, target) in expected_rag_edges, f"Edge {source} -> {target} not found" + + +def test_vector_store_rag_dump(ingestion_graph, rag_graph): + # Test ingestion graph dump + ingestion_graph_dump = ingestion_graph.dump( + name="Ingestion Graph", description="Graph for data ingestion", endpoint_name="ingestion" + ) + assert isinstance(ingestion_graph_dump, dict) + + ingestion_data = ingestion_graph_dump["data"] + assert "nodes" in ingestion_data + assert "edges" in ingestion_data + assert "description" in ingestion_graph_dump + assert "endpoint_name" in ingestion_graph_dump + + ingestion_nodes = ingestion_data["nodes"] + ingestion_edges = ingestion_data["edges"] + assert len(ingestion_nodes) == 4 # There are 4 components in the ingestion graph + assert len(ingestion_edges) == 3 # There are 3 connections between components + + # Test RAG graph dump + rag_graph_dump = rag_graph.dump( + name="RAG Graph", description="Graph for Retrieval-Augmented Generation", endpoint_name="rag" + ) + assert isinstance(rag_graph_dump, dict) + + rag_data = rag_graph_dump["data"] + assert "nodes" in rag_data + assert "edges" in rag_data + assert "description" in rag_graph_dump + assert "endpoint_name" in rag_graph_dump + + rag_nodes = rag_data["nodes"] + rag_edges = rag_data["edges"] + assert len(rag_nodes) == 7 # There are 7 components in the RAG graph + assert len(rag_edges) == 7 # There are 7 connections between components