Skip to content

Commit

Permalink
Add additional docs in retrieval agent if required (#1028)
Browse files Browse the repository at this point in the history
* Update conversable_agent.py

* Add files via upload

* Delete notebook/Async_human_input.ipynb

* Add files via upload

* refactor:formatter

* feat:updated position

* Update dbutils.py

* added feature to add docs in retrieve

* Update dbutils.py

* Update retrieve_user_proxy_agent.py

* Update retrieve_utils.py

* Update qdrant_retrieve_user_proxy_agent.py

* Update qdrant_retrieve_user_proxy_agent.py

* feat:fixed pre commit issue

---------

Co-authored-by: Chi Wang <[email protected]>
Co-authored-by: svrapidinnovation <[email protected]>
Co-authored-by: Li Jiang <[email protected]>
Co-authored-by: Qingyun Wu <[email protected]>
  • Loading branch information
5 people authored Dec 25, 2023
1 parent 70cc1f4 commit ebd5de9
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 5 deletions.
22 changes: 19 additions & 3 deletions autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ def __init__(
will be used. If you want to use other vector db, extend this class and override the `retrieve_docs` function.
- docs_path (Optional, Union[str, List[str]]): the path to the docs directory. It can also be the path to a single file,
the url to a single file or a list of directories, files and urls. Default is None, which works only if the collection is already created.
- extra_docs (Optional, bool): when true, allows adding documents with unique IDs without overwriting existing ones; when false, it replaces existing documents using default IDs, risking collection overwrite.,
when set to true it enables the system to assign unique IDs starting from "length+i" for new document chunks, preventing the replacement of existing documents and facilitating the addition of more content to the collection..
By default, "extra_docs" is set to false, starting document IDs from zero. This poses a risk as new documents might overwrite existing ones, potentially causing unintended loss or alteration of data in the collection.
- collection_name (Optional, str): the name of the collection.
If key not provided, a default name `autogen-docs` will be used.
- model (Optional, str): the model to use for the retrieve chat.
Expand Down Expand Up @@ -116,6 +119,7 @@ def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str =
custom_text_split_function=self.custom_text_split_function,
custom_text_types=self._custom_text_types,
recursive=self._recursive,
extra_docs=self._extra_docs,
parallel=self._parallel,
on_disk=self._on_disk,
quantization_config=self._quantization_config,
Expand Down Expand Up @@ -146,6 +150,7 @@ def create_qdrant_from_dir(
custom_text_split_function: Callable = None,
custom_text_types: List[str] = TEXT_FORMATS,
recursive: bool = True,
extra_docs: bool = False,
parallel: int = 0,
on_disk: bool = False,
quantization_config: Optional[models.QuantizationConfig] = None,
Expand All @@ -169,6 +174,7 @@ def create_qdrant_from_dir(
Default is None, will use the default function in `autogen.retrieve_utils.split_text_to_chunks`.
custom_text_types (Optional, List[str]): a list of file types to be processed. Default is TEXT_FORMATS.
recursive (Optional, bool): whether to search documents recursively in the dir_path. Default is True.
extra_docs (Optional, bool): whether to add more documents in the collection. Default is False
parallel (Optional, int): How many parallel workers to use for embedding. Defaults to the number of CPU cores
on_disk (Optional, bool): Whether to store the collection on disk. Default is False.
quantization_config: Quantization configuration. If None, quantization will be disabled.
Expand All @@ -194,22 +200,32 @@ def create_qdrant_from_dir(
)
logger.info(f"Found {len(chunks)} chunks.")

collection = None
# Check if collection by same name exists, if not, create it with custom options
try:
client.get_collection(collection_name=collection_name)
collection = client.get_collection(collection_name=collection_name)
except Exception:
client.create_collection(
collection_name=collection_name,
vectors_config=client.get_fastembed_vector_params(
on_disk=on_disk, quantization_config=quantization_config, hnsw_config=hnsw_config
),
)
client.get_collection(collection_name=collection_name)
collection = client.get_collection(collection_name=collection_name)

length = 0
if extra_docs:
length = len(collection.get()["ids"])

# Upsert in batch of 100 or less if the total number of chunks is less than 100
for i in range(0, len(chunks), min(100, len(chunks))):
end_idx = i + min(100, len(chunks) - i)
client.add(collection_name, documents=chunks[i:end_idx], ids=[j for j in range(i, end_idx)], parallel=parallel)
client.add(
collection_name,
documents=chunks[i:end_idx],
ids=[(j + length) for j in range(i, end_idx)],
parallel=parallel,
)

# Create a payload index for the document field
# Enables highly efficient payload filtering. Reference: https://qdrant.tech/documentation/concepts/indexing/#indexing
Expand Down
5 changes: 5 additions & 0 deletions autogen/agentchat/contrib/retrieve_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ def __init__(
will be used. If you want to use other vector db, extend this class and override the `retrieve_docs` function.
- docs_path (Optional, Union[str, List[str]]): the path to the docs directory. It can also be the path to a single file,
the url to a single file or a list of directories, files and urls. Default is None, which works only if the collection is already created.
- extra_docs (Optional, bool): when true, allows adding documents with unique IDs without overwriting existing ones; when false, it replaces existing documents using default IDs, risking collection overwrite.,
when set to true it enables the system to assign unique IDs starting from "length+i" for new document chunks, preventing the replacement of existing documents and facilitating the addition of more content to the collection..
By default, "extra_docs" is set to false, starting document IDs from zero. This poses a risk as new documents might overwrite existing ones, potentially causing unintended loss or alteration of data in the collection.
- collection_name (Optional, str): the name of the collection.
If key not provided, a default name `autogen-docs` will be used.
- model (Optional, str): the model to use for the retrieve chat.
Expand Down Expand Up @@ -171,6 +174,7 @@ def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str =
self._task = self._retrieve_config.get("task", "default")
self._client = self._retrieve_config.get("client", chromadb.Client())
self._docs_path = self._retrieve_config.get("docs_path", None)
self._extra_docs = self._retrieve_config.get("extra_docs", False)
self._collection_name = self._retrieve_config.get("collection_name", "autogen-docs")
if "docs_path" not in self._retrieve_config:
logger.warning(
Expand Down Expand Up @@ -392,6 +396,7 @@ def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str =
custom_text_split_function=self.custom_text_split_function,
custom_text_types=self._custom_text_types,
recursive=self._recursive,
extra_docs=self._extra_docs,
)
self._collection = True
self._get_or_create = True
Expand Down
9 changes: 7 additions & 2 deletions autogen/retrieve_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,7 @@ def create_vector_db_from_dir(
custom_text_split_function: Callable = None,
custom_text_types: List[str] = TEXT_FORMATS,
recursive: bool = True,
extra_docs: bool = False,
) -> API:
"""Create a vector db from all the files in a given directory, the directory can also be a single file or a url to
a single file. We support chromadb compatible APIs to create the vector db, this function is not required if
Expand All @@ -274,7 +275,7 @@ def create_vector_db_from_dir(
Default is None, will use the default function in `autogen.retrieve_utils.split_text_to_chunks`.
custom_text_types (Optional, List[str]): a list of file types to be processed. Default is TEXT_FORMATS.
recursive (Optional, bool): whether to search documents recursively in the dir_path. Default is True.
extra_docs (Optional, bool): whether to add more documents in the collection. Default is False
Returns:
API: the chromadb client.
"""
Expand All @@ -296,6 +297,10 @@ def create_vector_db_from_dir(
metadata={"hnsw:space": "ip", "hnsw:construction_ef": 30, "hnsw:M": 32}, # ip, l2, cosine
)

length = 0
if extra_docs:
length = len(collection.get()["ids"])

if custom_text_split_function is not None:
chunks = split_files_to_chunks(
get_files_from_dir(dir_path, custom_text_types, recursive),
Expand All @@ -314,7 +319,7 @@ def create_vector_db_from_dir(
end_idx = i + min(40000, len(chunks) - i)
collection.upsert(
documents=chunks[i:end_idx],
ids=[f"doc_{j}" for j in range(i, end_idx)], # unique for each doc
ids=[f"doc_{j+length}" for j in range(i, end_idx)], # unique for each doc
)
except ValueError as e:
logger.warning(f"{e}")
Expand Down

0 comments on commit ebd5de9

Please sign in to comment.