From 93408fb5e457c1a9e5977240373642b801906c43 Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Thu, 26 Sep 2024 11:12:15 -0700 Subject: [PATCH 01/11] fix notes style --- .../components/NodeDescription/index.tsx | 2 +- .../src/CustomNodes/NoteNode/index.tsx | 19 +--- src/frontend/src/constants/constants.ts | 1 + .../components/PageComponent/index.tsx | 86 +++++++++++-------- src/frontend/src/style/index.css | 2 + src/frontend/tailwind.config.mjs | 12 +++ 6 files changed, 65 insertions(+), 57 deletions(-) diff --git a/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx index fef0ac14666..b74e7955689 100644 --- a/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx @@ -146,7 +146,7 @@ export default function NodeDescription({ ) : ( diff --git a/src/frontend/src/CustomNodes/NoteNode/index.tsx b/src/frontend/src/CustomNodes/NoteNode/index.tsx index 64c63272b3c..fcd368875d4 100644 --- a/src/frontend/src/CustomNodes/NoteNode/index.tsx +++ b/src/frontend/src/CustomNodes/NoteNode/index.tsx @@ -9,9 +9,7 @@ import { noteDataType } from "@/types/flow"; import { cn } from "@/utils/utils"; import { useEffect, useMemo, useRef, useState } from "react"; import { NodeResizer, NodeToolbar } from "reactflow"; -import IconComponent from "../../components/genericIconComponent"; import NodeDescription from "../GenericNode/components/NodeDescription"; -import NodeName from "../GenericNode/components/NodeName"; import NoteToolbarComponent from "./NoteToolbarComponent"; function NoteNode({ data, @@ -67,25 +65,10 @@ function NoteNode({ }} ref={nodeDiv} className={cn( - "flex h-full w-full flex-col gap-3 rounded-md border border-b p-5 transition-all", + "flex h-full w-full flex-col gap-3 border border-b p-5 transition-all", selected ? "" : "-z-50 shadow-sm", )} > -
-
-
- -
- -
- -
-
-
(null); const currentFlowId = useFlowsManagerStore((state) => state.currentFlowId); + const [isAddingNote, setIsAddingNote] = useState(false); + function handleGroupNode() { takeSnapshot(); if (validateSelection(lastSelection!, edges).length === 0) { @@ -439,9 +441,52 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { [], ); - const onPaneClick = useCallback(() => { + const onPaneClick = useCallback((event: React.MouseEvent) => { setFilterEdge([]); - }, []); + if (isAddingNote) { + const position = reactFlowInstance?.screenToFlowPosition({ + x: event.clientX, + y: event.clientY, + }); + const data = { + node: { + description: "", + display_name: "", + documentation: "", + template: {}, + }, + type: "note", + }; + const newId = getNodeId(data.type); + + const newNode: NodeType = { + id: newId, + type: "noteNode", + position: position || { x: 0, y: 0 }, + data: { + ...data, + id: newId, + }, + }; + // const newNode: NodeType = { + // id: getNodeId("noteNode"), + // type: "noteNode", + // position: position || { x: 0, y: 0 }, + // data: { + // id: getNodeId("noteNode"), + // type: "note", + // node: { + // description: "", + // display_name: "", + // documentation: "", + // template: {}, + // }, + // }, + // }; + setNodes((nds) => nds.concat(newNode)); + setIsAddingNote(false); + } + }, [isAddingNote, setNodes, reactFlowInstance, getNodeId, setFilterEdge]); return (
@@ -487,42 +532,7 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { { - const wrapper = reactFlowWrapper.current!; - const viewport = reactFlowInstance?.getViewport(); - const x = wrapper.getBoundingClientRect().width / 2; - const y = wrapper.getBoundingClientRect().height / 2; - const nodePosition = - reactFlowInstance?.screenToFlowPosition({ x, y })!; - - const data = { - node: { - description: "", - display_name: "", - documentation: "", - template: {}, - }, - type: "note", - }; - const newId = getNodeId(data.type); - - const newNode: NodeType = { - id: newId, - type: "noteNode", - position: { x: 0, y: 0 }, - data: { - ...data, - id: newId, - }, - }; - paste( - { nodes: [newNode], edges: [] }, - { - x: nodePosition.x, - y: nodePosition?.y, - paneX: wrapper.getBoundingClientRect().x, - paneY: wrapper.getBoundingClientRect().y, - }, - ); + setIsAddingNote(true) }} className="postion react-flow__controls absolute -top-10" > diff --git a/src/frontend/src/style/index.css b/src/frontend/src/style/index.css index 287afead6c9..697c3ac4b86 100644 --- a/src/frontend/src/style/index.css +++ b/src/frontend/src/style/index.css @@ -71,6 +71,7 @@ --status-blue: #2563eb; --status-gray: #6b7280; --connection: #555; + --note-default: #f1f5f9; --note-indigo: #e0e7ff; --note-emerald: #d1fae5; --note-amber: #fef3c7; @@ -78,6 +79,7 @@ } .dark { + --note-default: #0f172a; --note-indigo: #312e81; --note-emerald: #064e3b; --note-amber: #78350f; diff --git a/src/frontend/tailwind.config.mjs b/src/frontend/tailwind.config.mjs index 201f55b20ea..d16237b4b74 100644 --- a/src/frontend/tailwind.config.mjs +++ b/src/frontend/tailwind.config.mjs @@ -255,6 +255,18 @@ const config = { outline: "none !important", outlineOffset: "0px !important", }, + ".note-node-markdown": { + lineHeight: "1", + "& ul li::marker": { + color: "black", + }, + "& ol li::marker": { + color: "black", + }, + "& h1, & h2, & h3, & h4, & h5, & h6, & p, & ul, & ol": { + marginBottom: "0.25rem", + }, + }, }); }), tailwindcssTypography, From 5e10bb8a8c00a052f71ffbd1018ffc097a3830a9 Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Thu, 26 Sep 2024 11:13:05 -0700 Subject: [PATCH 02/11] fix notes style --- .../starter_projects/Vector Store RAG.json | 4 ++-- .../FlowPage/components/PageComponent/index.tsx | 15 --------------- 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json index 55ca7a71701..2d7821088ea 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json @@ -656,7 +656,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://python.langchain.com/docs/integrations/vectorstores/astradb\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. (when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n @check_cached_vector_store\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n for i, (key, value) in enumerate(items):\n if key == field_name:\n break\n\n items.insert(i + 1, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=f\"The embedding model to use for the selected provider. Each provider has a different set of models \"\n f\"available (full list at https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider with each request to Astra DB. This may be used when Vectorize is configured for the collection, but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_api_key_name\",\n \"z_01_model_name\",\n \"z_02_authentication\",\n \"z_03_provider_api_key\",\n \"z_04_model_parameters\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_02_authentication or kwargs.get(\"z_02_authentication\", {}))}\n\n api_key_name = self.z_00_api_key_name or kwargs.get(\"z_00_api_key_name\")\n provider_key_name = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if provider_key_name:\n authentication[\"providerKey\"] = provider_key_name\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_01_model_name or kwargs.get(\"z_01_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_04_model_parameters or kwargs.get(\"z_04_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\"),\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError:\n raise ImportError(\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError:\n raise ValueError(f\"Invalid setup mode: {self.setup_mode}\")\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n raise ValueError(f\"Error initializing AstraDBVectorStore: {str(e)}\") from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n raise ValueError(\"Vector Store Inputs must be Data objects.\")\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n raise ValueError(f\"Error adding documents to AstraDBVectorStore: {str(e)}\") from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n elif self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n else:\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n raise ValueError(f\"Error performing search in AstraDBVectorStore: {str(e)}\") from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n else:\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" + "value": "from loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://python.langchain.com/docs/integrations/vectorstores/astradb\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. (when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n @check_cached_vector_store\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n for i, (key, value) in enumerate(items):\n if key == field_name:\n break\n\n items.insert(i + 1, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=f\"The embedding model to use for the selected provider. Each provider has a different set of models \"\n f\"available (full list at https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider with each request to Astra DB. This may be used when Vectorize is configured for the collection, but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_04_authentication or kwargs.get(\"z_04_authentication\", {}))}\n\n api_key_name = self.z_02_api_key_name or kwargs.get(\"z_02_api_key_name\")\n provider_key_name = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if provider_key_name:\n authentication[\"providerKey\"] = provider_key_name\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_00_model_name or kwargs.get(\"z_00_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_01_model_parameters or kwargs.get(\"z_01_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\"),\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError:\n raise ImportError(\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError:\n raise ValueError(f\"Invalid setup mode: {self.setup_mode}\")\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n raise ValueError(f\"Error initializing AstraDBVectorStore: {str(e)}\") from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n raise ValueError(\"Vector Store Inputs must be Data objects.\")\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n raise ValueError(f\"Error adding documents to AstraDBVectorStore: {str(e)}\") from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n elif self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n else:\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n raise ValueError(f\"Error performing search in AstraDBVectorStore: {str(e)}\") from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n else:\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" }, "collection_indexing_policy": { "advanced": true, @@ -1890,7 +1890,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://python.langchain.com/docs/integrations/vectorstores/astradb\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. (when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n @check_cached_vector_store\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n for i, (key, value) in enumerate(items):\n if key == field_name:\n break\n\n items.insert(i + 1, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=f\"The embedding model to use for the selected provider. Each provider has a different set of models \"\n f\"available (full list at https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider with each request to Astra DB. This may be used when Vectorize is configured for the collection, but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_api_key_name\",\n \"z_01_model_name\",\n \"z_02_authentication\",\n \"z_03_provider_api_key\",\n \"z_04_model_parameters\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_02_authentication or kwargs.get(\"z_02_authentication\", {}))}\n\n api_key_name = self.z_00_api_key_name or kwargs.get(\"z_00_api_key_name\")\n provider_key_name = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if provider_key_name:\n authentication[\"providerKey\"] = provider_key_name\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_01_model_name or kwargs.get(\"z_01_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_04_model_parameters or kwargs.get(\"z_04_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\"),\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError:\n raise ImportError(\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError:\n raise ValueError(f\"Invalid setup mode: {self.setup_mode}\")\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n raise ValueError(f\"Error initializing AstraDBVectorStore: {str(e)}\") from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n raise ValueError(\"Vector Store Inputs must be Data objects.\")\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n raise ValueError(f\"Error adding documents to AstraDBVectorStore: {str(e)}\") from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n elif self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n else:\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n raise ValueError(f\"Error performing search in AstraDBVectorStore: {str(e)}\") from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n else:\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" + "value": "from loguru import logger\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import DictInput, FloatInput, MessageTextInput\nfrom langflow.io import (\n BoolInput,\n DataInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\n\n\nclass AstraVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Implementation of Vector Store using Astra DB with search capabilities\"\n documentation: str = \"https://python.langchain.com/docs/integrations/vectorstores/astradb\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n VECTORIZE_PROVIDERS_MAPPING = {\n \"Azure OpenAI\": [\"azureOpenAI\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Hugging Face - Dedicated\": [\"huggingfaceDedicated\", [\"endpoint-defined-model\"]],\n \"Hugging Face - Serverless\": [\n \"huggingface\",\n [\n \"sentence-transformers/all-MiniLM-L6-v2\",\n \"intfloat/multilingual-e5-large\",\n \"intfloat/multilingual-e5-large-instruct\",\n \"BAAI/bge-small-en-v1.5\",\n \"BAAI/bge-base-en-v1.5\",\n \"BAAI/bge-large-en-v1.5\",\n ],\n ],\n \"Jina AI\": [\n \"jinaAI\",\n [\n \"jina-embeddings-v2-base-en\",\n \"jina-embeddings-v2-base-de\",\n \"jina-embeddings-v2-base-es\",\n \"jina-embeddings-v2-base-code\",\n \"jina-embeddings-v2-base-zh\",\n ],\n ],\n \"Mistral AI\": [\"mistral\", [\"mistral-embed\"]],\n \"NVIDIA\": [\"nvidia\", [\"NV-Embed-QA\"]],\n \"OpenAI\": [\"openai\", [\"text-embedding-3-small\", \"text-embedding-3-large\", \"text-embedding-ada-002\"]],\n \"Upstage\": [\"upstageAI\", [\"solar-embedding-1-large\"]],\n \"Voyage AI\": [\n \"voyageAI\",\n [\"voyage-large-2-instruct\", \"voyage-law-2\", \"voyage-code-2\", \"voyage-large-2\", \"voyage-2\"],\n ],\n }\n\n inputs = [\n StrInput(\n name=\"collection_name\",\n display_name=\"Collection Name\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n ),\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_endpoint\",\n display_name=\"API Endpoint\",\n info=\"API endpoint URL for the Astra DB service.\",\n value=\"ASTRA_DB_API_ENDPOINT\",\n required=True,\n ),\n MultilineInput(\n name=\"search_input\",\n display_name=\"Search Input\",\n ),\n DataInput(\n name=\"ingest_data\",\n display_name=\"Ingest Data\",\n is_list=True,\n ),\n StrInput(\n name=\"namespace\",\n display_name=\"Namespace\",\n info=\"Optional namespace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_service\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Determines whether to use Astra Vectorize for the collection.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n real_time_refresh=True,\n value=\"Embedding Model\",\n ),\n HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ),\n DropdownInput(\n name=\"metric\",\n display_name=\"Metric\",\n info=\"Optional distance metric for vector comparisons in the vector store.\",\n options=[\"cosine\", \"dot_product\", \"euclidean\"],\n advanced=True,\n ),\n IntInput(\n name=\"batch_size\",\n display_name=\"Batch Size\",\n info=\"Optional number of data to process in a single batch.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_batch_concurrency\",\n display_name=\"Bulk Insert Batch Concurrency\",\n info=\"Optional concurrency level for bulk insert operations.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_insert_overwrite_concurrency\",\n display_name=\"Bulk Insert Overwrite Concurrency\",\n info=\"Optional concurrency level for bulk insert operations that overwrite existing data.\",\n advanced=True,\n ),\n IntInput(\n name=\"bulk_delete_concurrency\",\n display_name=\"Bulk Delete Concurrency\",\n info=\"Optional concurrency level for bulk delete operations.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"setup_mode\",\n display_name=\"Setup Mode\",\n info=\"Configuration mode for setting up the vector store, with options like 'Sync', 'Async', or 'Off'.\",\n options=[\"Sync\", \"Async\", \"Off\"],\n advanced=True,\n value=\"Sync\",\n ),\n BoolInput(\n name=\"pre_delete_collection\",\n display_name=\"Pre Delete Collection\",\n info=\"Boolean flag to determine whether to delete the collection before creating a new one.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_include\",\n display_name=\"Metadata Indexing Include\",\n info=\"Optional list of metadata fields to include in the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"metadata_indexing_exclude\",\n display_name=\"Metadata Indexing Exclude\",\n info=\"Optional list of metadata fields to exclude from the indexing.\",\n advanced=True,\n ),\n StrInput(\n name=\"collection_indexing_policy\",\n display_name=\"Collection Indexing Policy\",\n info=\"Optional dictionary defining the indexing policy for the collection.\",\n advanced=True,\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Results\",\n info=\"Number of results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. (when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n DictInput(\n name=\"search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n is_list=True,\n ),\n ]\n\n @check_cached_vector_store\n def insert_in_dict(self, build_config, field_name, new_parameters):\n # Insert the new key-value pair after the found key\n for new_field_name, new_parameter in new_parameters.items():\n # Get all the items as a list of tuples (key, value)\n items = list(build_config.items())\n\n # Find the index of the key to insert after\n for i, (key, value) in enumerate(items):\n if key == field_name:\n break\n\n items.insert(i + 1, (new_field_name, new_parameter))\n\n # Clear the original dictionary and update with the modified items\n build_config.clear()\n build_config.update(items)\n\n return build_config\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n if field_name == \"embedding_service\":\n if field_value == \"Astra Vectorize\":\n for field in [\"embedding\"]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = DropdownInput(\n name=\"provider\",\n display_name=\"Vectorize Provider\",\n options=self.VECTORIZE_PROVIDERS_MAPPING.keys(),\n value=\"\",\n required=True,\n real_time_refresh=True,\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"provider\": new_parameter})\n else:\n for field in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n new_parameter = HandleInput(\n name=\"embedding\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Allows an embedding model configuration.\",\n ).to_dict()\n\n self.insert_in_dict(build_config, \"embedding_service\", {\"embedding\": new_parameter})\n\n elif field_name == \"provider\":\n for field in [\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if field in build_config:\n del build_config[field]\n\n model_options = self.VECTORIZE_PROVIDERS_MAPPING[field_value][1]\n\n new_parameter_0 = DropdownInput(\n name=\"z_00_model_name\",\n display_name=\"Model Name\",\n info=f\"The embedding model to use for the selected provider. Each provider has a different set of models \"\n f\"available (full list at https://docs.datastax.com/en/astra-db-serverless/databases/embedding-generation.html):\\n\\n{', '.join(model_options)}\",\n options=model_options,\n required=True,\n ).to_dict()\n\n new_parameter_1 = DictInput(\n name=\"z_01_model_parameters\",\n display_name=\"Model Parameters\",\n is_list=True,\n ).to_dict()\n\n new_parameter_2 = MessageTextInput(\n name=\"z_02_api_key_name\",\n display_name=\"API Key name\",\n info=\"The name of the embeddings provider API key stored on Astra. If set, it will override the 'ProviderKey' in the authentication parameters.\",\n ).to_dict()\n\n new_parameter_3 = SecretStrInput(\n name=\"z_03_provider_api_key\",\n display_name=\"Provider API Key\",\n info=\"An alternative to the Astra Authentication that passes an API key for the provider with each request to Astra DB. This may be used when Vectorize is configured for the collection, but no corresponding provider secret is stored within Astra's key management system.\",\n ).to_dict()\n\n new_parameter_4 = DictInput(\n name=\"z_04_authentication\",\n display_name=\"Authentication parameters\",\n is_list=True,\n ).to_dict()\n\n self.insert_in_dict(\n build_config,\n \"provider\",\n {\n \"z_00_model_name\": new_parameter_0,\n \"z_01_model_parameters\": new_parameter_1,\n \"z_02_api_key_name\": new_parameter_2,\n \"z_03_provider_api_key\": new_parameter_3,\n \"z_04_authentication\": new_parameter_4,\n },\n )\n\n return build_config\n\n def build_vectorize_options(self, **kwargs):\n for attribute in [\n \"provider\",\n \"z_00_model_name\",\n \"z_01_model_parameters\",\n \"z_02_api_key_name\",\n \"z_03_provider_api_key\",\n \"z_04_authentication\",\n ]:\n if not hasattr(self, attribute):\n setattr(self, attribute, None)\n\n # Fetch values from kwargs if any self.* attributes are None\n provider_value = self.VECTORIZE_PROVIDERS_MAPPING.get(self.provider, [None])[0] or kwargs.get(\"provider\")\n authentication = {**(self.z_04_authentication or kwargs.get(\"z_04_authentication\", {}))}\n\n api_key_name = self.z_02_api_key_name or kwargs.get(\"z_02_api_key_name\")\n provider_key_name = self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\")\n if provider_key_name:\n authentication[\"providerKey\"] = provider_key_name\n if api_key_name:\n authentication[\"providerKey\"] = api_key_name\n\n return {\n # must match astrapy.info.CollectionVectorServiceOptions\n \"collection_vector_service_options\": {\n \"provider\": provider_value,\n \"modelName\": self.z_00_model_name or kwargs.get(\"z_00_model_name\"),\n \"authentication\": authentication,\n \"parameters\": self.z_01_model_parameters or kwargs.get(\"z_01_model_parameters\", {}),\n },\n \"collection_embedding_api_key\": self.z_03_provider_api_key or kwargs.get(\"z_03_provider_api_key\"),\n }\n\n @check_cached_vector_store\n def build_vector_store(self, vectorize_options=None):\n try:\n from langchain_astradb import AstraDBVectorStore\n from langchain_astradb.utils.astradb import SetupMode\n except ImportError:\n raise ImportError(\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n\n try:\n if not self.setup_mode:\n self.setup_mode = self._inputs[\"setup_mode\"].options[0]\n\n setup_mode_value = SetupMode[self.setup_mode.upper()]\n except KeyError:\n raise ValueError(f\"Invalid setup mode: {self.setup_mode}\")\n\n if self.embedding:\n embedding_dict = {\"embedding\": self.embedding}\n else:\n from astrapy.info import CollectionVectorServiceOptions\n\n dict_options = vectorize_options or self.build_vectorize_options()\n dict_options[\"authentication\"] = {\n k: v for k, v in dict_options.get(\"authentication\", {}).items() if k and v\n }\n dict_options[\"parameters\"] = {k: v for k, v in dict_options.get(\"parameters\", {}).items() if k and v}\n\n embedding_dict = {\n \"collection_vector_service_options\": CollectionVectorServiceOptions.from_dict(\n dict_options.get(\"collection_vector_service_options\", {})\n ),\n }\n\n vector_store_kwargs = {\n **embedding_dict,\n \"collection_name\": self.collection_name,\n \"token\": self.token,\n \"api_endpoint\": self.api_endpoint,\n \"namespace\": self.namespace or None,\n \"metric\": self.metric or None,\n \"batch_size\": self.batch_size or None,\n \"bulk_insert_batch_concurrency\": self.bulk_insert_batch_concurrency or None,\n \"bulk_insert_overwrite_concurrency\": self.bulk_insert_overwrite_concurrency or None,\n \"bulk_delete_concurrency\": self.bulk_delete_concurrency or None,\n \"setup_mode\": setup_mode_value,\n \"pre_delete_collection\": self.pre_delete_collection or False,\n }\n\n if self.metadata_indexing_include:\n vector_store_kwargs[\"metadata_indexing_include\"] = self.metadata_indexing_include\n elif self.metadata_indexing_exclude:\n vector_store_kwargs[\"metadata_indexing_exclude\"] = self.metadata_indexing_exclude\n elif self.collection_indexing_policy:\n vector_store_kwargs[\"collection_indexing_policy\"] = self.collection_indexing_policy\n\n try:\n vector_store = AstraDBVectorStore(**vector_store_kwargs)\n except Exception as e:\n raise ValueError(f\"Error initializing AstraDBVectorStore: {str(e)}\") from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store):\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n raise ValueError(\"Vector Store Inputs must be Data objects.\")\n\n if documents:\n logger.debug(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n raise ValueError(f\"Error adding documents to AstraDBVectorStore: {str(e)}\") from e\n else:\n logger.debug(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self):\n if self.search_type == \"Similarity with score threshold\":\n return \"similarity_score_threshold\"\n elif self.search_type == \"MMR (Max Marginal Relevance)\":\n return \"mmr\"\n else:\n return \"similarity\"\n\n def _build_search_args(self):\n args = {\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n\n if self.search_filter:\n clean_filter = {k: v for k, v in self.search_filter.items() if k and v}\n if len(clean_filter) > 0:\n args[\"filter\"] = clean_filter\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n if not vector_store:\n vector_store = self.build_vector_store()\n\n logger.debug(f\"Search input: {self.search_input}\")\n logger.debug(f\"Search type: {self.search_type}\")\n logger.debug(f\"Number of results: {self.number_of_results}\")\n\n if self.search_input and isinstance(self.search_input, str) and self.search_input.strip():\n try:\n search_type = self._map_search_type()\n search_args = self._build_search_args()\n\n docs = vector_store.search(query=self.search_input, search_type=search_type, **search_args)\n except Exception as e:\n raise ValueError(f\"Error performing search in AstraDBVectorStore: {str(e)}\") from e\n\n logger.debug(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n logger.debug(f\"Converted documents to data: {len(data)}\")\n self.status = data\n return data\n else:\n logger.debug(\"No search input provided. Skipping search.\")\n return []\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" }, "collection_indexing_policy": { "advanced": true, diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index c8363f11b3b..75c7bb7d23c 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -468,21 +468,6 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { id: newId, }, }; - // const newNode: NodeType = { - // id: getNodeId("noteNode"), - // type: "noteNode", - // position: position || { x: 0, y: 0 }, - // data: { - // id: getNodeId("noteNode"), - // type: "note", - // node: { - // description: "", - // display_name: "", - // documentation: "", - // template: {}, - // }, - // }, - // }; setNodes((nds) => nds.concat(newNode)); setIsAddingNote(false); } From efe5997c079148ec213de7009e4bbb584babd26f Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Thu, 26 Sep 2024 12:07:17 -0700 Subject: [PATCH 03/11] adding ghost node --- src/frontend/src/constants/constants.ts | 8 ++++++ .../components/PageComponent/index.tsx | 28 +++++++++++++++++++ src/frontend/src/style/index.css | 12 ++++++++ 3 files changed, 48 insertions(+) diff --git a/src/frontend/src/constants/constants.ts b/src/frontend/src/constants/constants.ts index dac7bea1562..282642646d0 100644 --- a/src/frontend/src/constants/constants.ts +++ b/src/frontend/src/constants/constants.ts @@ -917,4 +917,12 @@ export const COLOR_OPTIONS = { red: "var(--note-red)", }; +export const SHADOW_COLOR_OPTIONS = { + default: "var(--note-default-opacity)", + indigo: "var(--note-indigo-opacity)", + emerald: "var(--note-emerald-opacity)", + amber: "var(--note-amber-opacity)", + red: "var(--note-red-opacity)", +} + export const maxSizeFilesInBytes = 10 * 1024 * 1024; // 10MB in bytes diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index 75c7bb7d23c..00c919f162a 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -1,3 +1,8 @@ +import { + SHADOW_COLOR_OPTIONS, + NOTE_NODE_MIN_HEIGHT, + NOTE_NODE_MIN_WIDTH, +} from "@/constants/constants"; import { DefaultEdge } from "@/CustomEdges"; import NoteNode from "@/CustomNodes/NoteNode"; import IconComponent from "@/components/genericIconComponent"; @@ -444,6 +449,10 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { const onPaneClick = useCallback((event: React.MouseEvent) => { setFilterEdge([]); if (isAddingNote) { + const shadowBox = document.getElementById('shadow-box'); + if (shadowBox) { + shadowBox.style.display = 'none'; + } const position = reactFlowInstance?.screenToFlowPosition({ x: event.clientX, y: event.clientY, @@ -473,6 +482,17 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { } }, [isAddingNote, setNodes, reactFlowInstance, getNodeId, setFilterEdge]); + const onPaneMouseMove = useCallback((event: React.MouseEvent) => { + if (isAddingNote) { + const shadowBox = document.getElementById('shadow-box'); + if (shadowBox) { + shadowBox.style.display = 'block'; + shadowBox.style.left = `${event.clientX + 10}px`; + shadowBox.style.top = `${event.clientY + 10}px`; + } + } + }, [isAddingNote]); + return (
{showCanvas ? ( @@ -510,6 +530,7 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { panActivationKeyCode={""} proOptions={{ hideAttribution: true }} onPaneClick={onPaneClick} + onPaneMouseMove={onPaneMouseMove} > {!view && ( @@ -542,6 +563,13 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { }} /> +
+
) : (
diff --git a/src/frontend/src/style/index.css b/src/frontend/src/style/index.css index 697c3ac4b86..98459efdaca 100644 --- a/src/frontend/src/style/index.css +++ b/src/frontend/src/style/index.css @@ -76,6 +76,12 @@ --note-emerald: #d1fae5; --note-amber: #fef3c7; --note-red: #fee2e2; + + --note-default-opacity: #f1f5f980; + --note-indigo-opacity: #312e8180; + --note-emerald-opacity: #064e3b80; + --note-amber-opacity: #78350f80; + --note-red-opacity: #7f1d1d80; } .dark { @@ -86,6 +92,12 @@ --note-red: #7f1d1d; --note-placeholder: 216 12% 84%; /* hsl(216 12% 84%) */ + --note-default-opacity: #0f172a80; + --note-indigo-opacity: #312e8180; + --note-emerald-opacity: #064e3b80; + --note-amber-opacity: #78350f80; + --note-red-opacity: #7f1d1d80; + --node-selected: 234 89% 74%; --background: 224 28% 7.5%; /* hsl(224 10% 7.5%) */ --foreground: 213 31% 80%; /* hsl(213 31% 91%) */ From 0405c91c4fa4213dd8c95337cbc6a1fb75dfb248 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Thu, 26 Sep 2024 19:08:55 +0000 Subject: [PATCH 04/11] [autofix.ci] apply automated fixes --- .../components/NodeDescription/index.tsx | 2 +- src/frontend/src/constants/constants.ts | 2 +- .../components/PageComponent/index.tsx | 122 ++++++++++-------- 3 files changed, 67 insertions(+), 59 deletions(-) diff --git a/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx b/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx index b74e7955689..d723da40c9a 100644 --- a/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx +++ b/src/frontend/src/CustomNodes/GenericNode/components/NodeDescription/index.tsx @@ -146,7 +146,7 @@ export default function NodeDescription({ ) : ( diff --git a/src/frontend/src/constants/constants.ts b/src/frontend/src/constants/constants.ts index 282642646d0..b0551a3c3e4 100644 --- a/src/frontend/src/constants/constants.ts +++ b/src/frontend/src/constants/constants.ts @@ -923,6 +923,6 @@ export const SHADOW_COLOR_OPTIONS = { emerald: "var(--note-emerald-opacity)", amber: "var(--note-amber-opacity)", red: "var(--note-red-opacity)", -} +}; export const maxSizeFilesInBytes = 10 * 1024 * 1024; // 10MB in bytes diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index 00c919f162a..cb59130dd4c 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -1,13 +1,13 @@ -import { - SHADOW_COLOR_OPTIONS, - NOTE_NODE_MIN_HEIGHT, - NOTE_NODE_MIN_WIDTH, -} from "@/constants/constants"; import { DefaultEdge } from "@/CustomEdges"; import NoteNode from "@/CustomNodes/NoteNode"; import IconComponent from "@/components/genericIconComponent"; import LoadingComponent from "@/components/loadingComponent"; import ShadTooltip from "@/components/shadTooltipComponent"; +import { + NOTE_NODE_MIN_HEIGHT, + NOTE_NODE_MIN_WIDTH, + SHADOW_COLOR_OPTIONS, +} from "@/constants/constants"; import { useGetBuildsQuery } from "@/controllers/API/queries/_builds"; import { track } from "@/customization/utils/analytics"; import useAutoSaveFlow from "@/hooks/flows/use-autosave-flow"; @@ -446,52 +446,58 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { [], ); - const onPaneClick = useCallback((event: React.MouseEvent) => { - setFilterEdge([]); - if (isAddingNote) { - const shadowBox = document.getElementById('shadow-box'); - if (shadowBox) { - shadowBox.style.display = 'none'; - } - const position = reactFlowInstance?.screenToFlowPosition({ - x: event.clientX, - y: event.clientY, - }); - const data = { - node: { - description: "", - display_name: "", - documentation: "", - template: {}, - }, - type: "note", - }; - const newId = getNodeId(data.type); - - const newNode: NodeType = { - id: newId, - type: "noteNode", - position: position || { x: 0, y: 0 }, - data: { - ...data, + const onPaneClick = useCallback( + (event: React.MouseEvent) => { + setFilterEdge([]); + if (isAddingNote) { + const shadowBox = document.getElementById("shadow-box"); + if (shadowBox) { + shadowBox.style.display = "none"; + } + const position = reactFlowInstance?.screenToFlowPosition({ + x: event.clientX, + y: event.clientY, + }); + const data = { + node: { + description: "", + display_name: "", + documentation: "", + template: {}, + }, + type: "note", + }; + const newId = getNodeId(data.type); + + const newNode: NodeType = { id: newId, - }, - }; - setNodes((nds) => nds.concat(newNode)); - setIsAddingNote(false); - } - }, [isAddingNote, setNodes, reactFlowInstance, getNodeId, setFilterEdge]); - - const onPaneMouseMove = useCallback((event: React.MouseEvent) => { - if (isAddingNote) { - const shadowBox = document.getElementById('shadow-box'); - if (shadowBox) { - shadowBox.style.display = 'block'; - shadowBox.style.left = `${event.clientX + 10}px`; - shadowBox.style.top = `${event.clientY + 10}px`; + type: "noteNode", + position: position || { x: 0, y: 0 }, + data: { + ...data, + id: newId, + }, + }; + setNodes((nds) => nds.concat(newNode)); + setIsAddingNote(false); } - } - }, [isAddingNote]); + }, + [isAddingNote, setNodes, reactFlowInstance, getNodeId, setFilterEdge], + ); + + const onPaneMouseMove = useCallback( + (event: React.MouseEvent) => { + if (isAddingNote) { + const shadowBox = document.getElementById("shadow-box"); + if (shadowBox) { + shadowBox.style.display = "block"; + shadowBox.style.left = `${event.clientX + 10}px`; + shadowBox.style.top = `${event.clientY + 10}px`; + } + } + }, + [isAddingNote], + ); return (
@@ -538,7 +544,7 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { { - setIsAddingNote(true) + setIsAddingNote(true); }} className="postion react-flow__controls absolute -top-10" > @@ -563,13 +569,15 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { }} /> -
-
+
) : (
From b56ffa46f1d14a1bc59fd14e1020cb8565fd75a9 Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Tue, 1 Oct 2024 08:18:18 -0700 Subject: [PATCH 05/11] change cursor position --- .../src/pages/FlowPage/components/PageComponent/index.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index e720b5a405f..46d9a1e64ee 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -494,8 +494,8 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { const shadowBox = document.getElementById("shadow-box"); if (shadowBox) { shadowBox.style.display = "block"; - shadowBox.style.left = `${event.clientX + 10}px`; - shadowBox.style.top = `${event.clientY + 10}px`; + shadowBox.style.left = `${event.clientX + 1}px`; + shadowBox.style.top = `${event.clientY + 1}px`; } } }, From 37f4f00659024e1e107b8fb2e3290098cd9b8848 Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Tue, 1 Oct 2024 11:07:20 -0700 Subject: [PATCH 06/11] update notes related test --- .../extended/features/sticky-notes.spec.ts | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/frontend/tests/extended/features/sticky-notes.spec.ts b/src/frontend/tests/extended/features/sticky-notes.spec.ts index a3358ebf50c..2e4e3b502ba 100644 --- a/src/frontend/tests/extended/features/sticky-notes.spec.ts +++ b/src/frontend/tests/extended/features/sticky-notes.spec.ts @@ -30,8 +30,11 @@ test("user should be able to interact with sticky notes", async ({ page }) => { control = "Meta"; } - const noteText = ` - Artificial Intelligence (AI) has rapidly evolved from a speculative concept in science fiction to a transformative force reshaping industries and everyday life. The term AI encompasses a broad range of technologies, from simple algorithms designed to perform specific tasks to complex systems capable of learning and adapting independently. As AI continues to advance, its applications are becoming increasingly diverse, impacting everything from healthcare to finance, entertainment, and beyond. + const randomTitle = Math.random().toString(36).substring(7).padEnd(8, 'x').substring(0, 8); + + const noteText = `# ${randomTitle} + +Artificial Intelligence (AI) has rapidly evolved from a speculative concept in science fiction to a transformative force reshaping industries and everyday life. The term AI encompasses a broad range of technologies, from simple algorithms designed to perform specific tasks to complex systems capable of learning and adapting independently. As AI continues to advance, its applications are becoming increasingly diverse, impacting everything from healthcare to finance, entertainment, and beyond. At its core, AI is about creating systems that can perform tasks that would typically require human intelligence. This includes abilities such as visual perception, speech recognition, decision-making, and even language translation. The development of AI can be traced back to the mid-20th century, when pioneers like Alan Turing began exploring the idea of machines that could think. Turing's famous "Turing Test" proposed a benchmark for AI, where a machine would be considered intelligent if it could engage in a conversation with a human without being detected as a machine. @@ -50,8 +53,6 @@ Despite its many benefits, AI also raises important ethical and societal questio The future of AI is both exciting and uncertain. As the technology continues to advance, it will undoubtedly bring about profound changes in society. The challenge will be to harness AI's potential for good while addressing the ethical and societal issues that arise. Whether it's through smarter healthcare, more efficient transportation, or enhanced creativity, AI has the potential to reshape the world in ways we are only beginning to imagine. The journey of AI is far from over, and its impact will be felt for generations to come. `; - const randomTitle = Math.random().toString(36).substring(7); - while (modalCount === 0) { await page.getByText("New Project", { exact: true }).click(); await page.waitForTimeout(3000); @@ -70,6 +71,7 @@ The future of AI is both exciting and uncertain. As the technology continues to await page.waitForTimeout(1000); const targetElement = await page.locator('//*[@id="react-flow-id"]'); + await targetElement.click(); await page.mouse.up(); await page.mouse.down(); @@ -83,20 +85,14 @@ The future of AI is both exciting and uncertain. As the technology continues to await page.getByTitle("zoom out").click(); await page.getByTestId("note_node").click(); - - await page.getByTestId("title-Note").dblclick(); - await page.waitForTimeout(1000); - await page.getByTestId("popover-anchor-input-title-Note").fill(randomTitle); - - await page.getByTestId("note_icon").first().dblclick(); + await page.locator(".generic-node-desc").last().dblclick(); await page.getByTestId("textarea").fill(noteText); expect(await page.getByText("2500/2500")).toBeVisible(); - await page.getByTestId("note_icon").first().dblclick(); - + await targetElement.click(); const textMarkdown = await page.locator(".markdown").innerText(); const textLength = textMarkdown.length; @@ -110,7 +106,7 @@ The future of AI is both exciting and uncertain. As the technology continues to let hasStyles = await element?.evaluate((el) => { const style = window.getComputedStyle(el); - return style.backgroundColor === "rgb(224, 231, 255)"; + return style.backgroundColor === "rgb(241, 245, 249)"; }); expect(hasStyles).toBe(true); From 46d005a00327d0c016b311261f0b63cd5aca3ea5 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 18:09:30 +0000 Subject: [PATCH 07/11] [autofix.ci] apply automated fixes --- src/frontend/tests/extended/features/sticky-notes.spec.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/frontend/tests/extended/features/sticky-notes.spec.ts b/src/frontend/tests/extended/features/sticky-notes.spec.ts index 2e4e3b502ba..51304d074c9 100644 --- a/src/frontend/tests/extended/features/sticky-notes.spec.ts +++ b/src/frontend/tests/extended/features/sticky-notes.spec.ts @@ -30,7 +30,11 @@ test("user should be able to interact with sticky notes", async ({ page }) => { control = "Meta"; } - const randomTitle = Math.random().toString(36).substring(7).padEnd(8, 'x').substring(0, 8); + const randomTitle = Math.random() + .toString(36) + .substring(7) + .padEnd(8, "x") + .substring(0, 8); const noteText = `# ${randomTitle} @@ -85,7 +89,6 @@ The future of AI is both exciting and uncertain. As the technology continues to await page.getByTitle("zoom out").click(); await page.getByTestId("note_node").click(); - await page.locator(".generic-node-desc").last().dblclick(); await page.getByTestId("textarea").fill(noteText); From 42f0ba776b919f87a19c58d5e96cb15e69c3fdf8 Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Tue, 1 Oct 2024 13:48:30 -0700 Subject: [PATCH 08/11] adjust shadow block width --- src/frontend/src/CustomNodes/NoteNode/index.tsx | 2 +- .../src/pages/FlowPage/components/PageComponent/index.tsx | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/frontend/src/CustomNodes/NoteNode/index.tsx b/src/frontend/src/CustomNodes/NoteNode/index.tsx index fcd368875d4..5e1866e5864 100644 --- a/src/frontend/src/CustomNodes/NoteNode/index.tsx +++ b/src/frontend/src/CustomNodes/NoteNode/index.tsx @@ -65,7 +65,7 @@ function NoteNode({ }} ref={nodeDiv} className={cn( - "flex h-full w-full flex-col gap-3 border border-b p-5 transition-all", + "flex h-full w-full flex-col gap-3 border border-b p-3 transition-all", selected ? "" : "-z-50 shadow-sm", )} > diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index 46d9a1e64ee..cca1c1a713c 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -502,6 +502,8 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { [isAddingNote], ); + const zoomLevel = reactFlowInstance?.getZoom(); + return (
{showCanvas ? ( @@ -576,8 +578,8 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { id="shadow-box" style={{ position: "absolute", - width: `${NOTE_NODE_MIN_WIDTH / 2}px`, - height: `${NOTE_NODE_MIN_HEIGHT / 2}px`, + width: `${NOTE_NODE_MIN_WIDTH * (zoomLevel || 1)}px`, + height: `${NOTE_NODE_MIN_HEIGHT * (zoomLevel || 1)}px`, backgroundColor: `${SHADOW_COLOR_OPTIONS[Object.keys(SHADOW_COLOR_OPTIONS)[0]]}`, }} >
From 6c920082e6a692ab6777baef2bd0474d498507dd Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Tue, 1 Oct 2024 15:51:58 -0700 Subject: [PATCH 09/11] move cursor to middle: --- .../components/PageComponent/index.tsx | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index cca1c1a713c..29ca073bb54 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -116,6 +116,11 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { const [isAddingNote, setIsAddingNote] = useState(false); + const zoomLevel = reactFlowInstance?.getZoom(); + const shadowBoxWidth = NOTE_NODE_MIN_WIDTH * (zoomLevel || 1); + const shadowBoxHeight = NOTE_NODE_MIN_HEIGHT * (zoomLevel || 1); + const shadowBoxBackgroundColor = SHADOW_COLOR_OPTIONS[Object.keys(SHADOW_COLOR_OPTIONS)[0]]; + function handleGroupNode() { takeSnapshot(); if (validateSelection(lastSelection!, edges).length === 0) { @@ -458,8 +463,8 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { shadowBox.style.display = "none"; } const position = reactFlowInstance?.screenToFlowPosition({ - x: event.clientX, - y: event.clientY, + x: event.clientX - shadowBoxWidth / 2, + y: event.clientY - shadowBoxHeight / 2, }); const data = { node: { @@ -494,16 +499,14 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { const shadowBox = document.getElementById("shadow-box"); if (shadowBox) { shadowBox.style.display = "block"; - shadowBox.style.left = `${event.clientX + 1}px`; - shadowBox.style.top = `${event.clientY + 1}px`; + shadowBox.style.left = `${event.clientX - shadowBoxWidth / 2}px`; + shadowBox.style.top = `${event.clientY - shadowBoxHeight / 2}px`; } } }, [isAddingNote], ); - const zoomLevel = reactFlowInstance?.getZoom(); - return (
{showCanvas ? ( @@ -578,9 +581,10 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { id="shadow-box" style={{ position: "absolute", - width: `${NOTE_NODE_MIN_WIDTH * (zoomLevel || 1)}px`, - height: `${NOTE_NODE_MIN_HEIGHT * (zoomLevel || 1)}px`, - backgroundColor: `${SHADOW_COLOR_OPTIONS[Object.keys(SHADOW_COLOR_OPTIONS)[0]]}`, + width: `${shadowBoxWidth}px`, + height: `${shadowBoxHeight}px`, + backgroundColor: `${shadowBoxBackgroundColor}`, + pointerEvents: "none", }} >
From 9121f1e549be5ad27fe4f42fba23969b55b2a688 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 22:53:14 +0000 Subject: [PATCH 10/11] [autofix.ci] apply automated fixes --- .../src/pages/FlowPage/components/PageComponent/index.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx index 29ca073bb54..91a4941a5f6 100644 --- a/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx +++ b/src/frontend/src/pages/FlowPage/components/PageComponent/index.tsx @@ -119,7 +119,8 @@ export default function Page({ view }: { view?: boolean }): JSX.Element { const zoomLevel = reactFlowInstance?.getZoom(); const shadowBoxWidth = NOTE_NODE_MIN_WIDTH * (zoomLevel || 1); const shadowBoxHeight = NOTE_NODE_MIN_HEIGHT * (zoomLevel || 1); - const shadowBoxBackgroundColor = SHADOW_COLOR_OPTIONS[Object.keys(SHADOW_COLOR_OPTIONS)[0]]; + const shadowBoxBackgroundColor = + SHADOW_COLOR_OPTIONS[Object.keys(SHADOW_COLOR_OPTIONS)[0]]; function handleGroupNode() { takeSnapshot(); From ca3bdb76aae2f97c0cede7254b052fc36e94dac8 Mon Sep 17 00:00:00 2001 From: Yuqi Tang Date: Tue, 1 Oct 2024 16:21:09 -0700 Subject: [PATCH 11/11] fix padding --- src/frontend/src/CustomNodes/NoteNode/index.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/frontend/src/CustomNodes/NoteNode/index.tsx b/src/frontend/src/CustomNodes/NoteNode/index.tsx index 5e1866e5864..234f213e5b8 100644 --- a/src/frontend/src/CustomNodes/NoteNode/index.tsx +++ b/src/frontend/src/CustomNodes/NoteNode/index.tsx @@ -26,8 +26,8 @@ function NoteNode({ useEffect(() => { if (nodeDiv.current) { setSize({ - width: nodeDiv.current.offsetWidth - 43, - height: nodeDiv.current.offsetHeight - 80, + width: nodeDiv.current.offsetWidth - 25, + height: nodeDiv.current.offsetHeight - 25, }); } }, []); @@ -49,7 +49,7 @@ function NoteNode({ maxWidth={NOTE_NODE_MAX_WIDTH} onResize={(_, params) => { const { width, height } = params; - setSize({ width: width - 43, height: height - 80 }); + setSize({ width: width - 25, height: height - 25 }); }} isVisible={selected} lineClassName="border-[3px] border-border"