From f669f57ec522068016e61dfed93b82f49dd89662 Mon Sep 17 00:00:00 2001 From: Dishant1804 Date: Fri, 17 Oct 2025 00:05:37 +0530 Subject: [PATCH 1/7] agentic rag --- backend/apps/ai/Makefile | 6 +- backend/apps/ai/agent/agent.py | 70 ++++ backend/apps/ai/agent/nodes.py | 269 ++++++++++++++ backend/apps/ai/agent/tools/rag/generator.py | 12 +- backend/apps/ai/agent/tools/rag/rag_tool.py | 63 ---- backend/apps/ai/common/constants.py | 4 +- .../management/commands/ai_run_agentic_rag.py | 34 ++ .../ai/management/commands/ai_run_rag_tool.py | 70 ---- backend/apps/ai/models/chunk.py | 4 +- .../core/migrations/0003_alter_prompt_text.py | 17 + backend/apps/core/models/prompt.py | 22 +- backend/apps/slack/common/handlers/ai.py | 13 +- backend/apps/slack/events/app_mention.py | 14 +- backend/poetry.lock | 350 +++++++++++++++++- backend/pyproject.toml | 1 + .../apps/ai/agent/tools/rag/generator_test.py | 2 +- .../apps/ai/agent/tools/rag/rag_tool_test.py | 179 --------- .../commands/ai_run_rag_tool_test.py | 142 ------- .../apps/slack/common/handlers/ai_test.py | 53 ++- 19 files changed, 812 insertions(+), 513 deletions(-) create mode 100644 backend/apps/ai/agent/agent.py create mode 100644 backend/apps/ai/agent/nodes.py delete mode 100644 backend/apps/ai/agent/tools/rag/rag_tool.py create mode 100644 backend/apps/ai/management/commands/ai_run_agentic_rag.py delete mode 100644 backend/apps/ai/management/commands/ai_run_rag_tool.py create mode 100644 backend/apps/core/migrations/0003_alter_prompt_text.py delete mode 100644 backend/tests/apps/ai/agent/tools/rag/rag_tool_test.py delete mode 100644 backend/tests/apps/ai/management/commands/ai_run_rag_tool_test.py diff --git a/backend/apps/ai/Makefile b/backend/apps/ai/Makefile index 4e793a5393..2ee28cbb24 100644 --- a/backend/apps/ai/Makefile +++ b/backend/apps/ai/Makefile @@ -1,6 +1,6 @@ -ai-run-rag-tool: - @echo "Running RAG tool" - @CMD="python manage.py ai_run_rag_tool" $(MAKE) exec-backend-command +ai-run-agentic-rag: + @echo "Running agentic RAG" + @CMD="python manage.py ai_run_agentic_rag" $(MAKE) exec-backend-command ai-update-chapter-chunks: @echo "Updating chapter chunks" diff --git a/backend/apps/ai/agent/agent.py b/backend/apps/ai/agent/agent.py new file mode 100644 index 0000000000..917dfa10e0 --- /dev/null +++ b/backend/apps/ai/agent/agent.py @@ -0,0 +1,70 @@ +"""LangGraph-powered agent for iterative RAG answering.""" + +from __future__ import annotations + +import logging +from typing import Any + +from langgraph.graph import END, START, StateGraph + +from apps.ai.agent.nodes import AgentNodes +from apps.ai.common.constants import ( + DEFAULT_CHUNKS_RETRIEVAL_LIMIT, + DEFAULT_SIMILARITY_THRESHOLD, +) + +logger = logging.getLogger(__name__) + + +class AgenticRAGAgent: + """LangGraph-based controller for agentic RAG with self-correcting retrieval.""" + + def __init__(self) -> None: + """Initialize the AgenticRAGAgent.""" + self.nodes = AgentNodes() + self.graph = self.build_graph() + + def run( + self, + query: str, + ) -> dict[str, Any]: + """Execute the full RAG loop.""" + initial_state: dict[str, Any] = { + "query": query, + "iteration": 0, + "feedback": None, + "history": [], + "content_types": [], + "limit": DEFAULT_CHUNKS_RETRIEVAL_LIMIT, + "similarity_threshold": DEFAULT_SIMILARITY_THRESHOLD, + } + + logger.info("Starting Agentic RAG workflow with metadata-aware retrieval") + final_state = self.graph.invoke(initial_state) + + return { + "answer": final_state.get("answer", ""), + "iterations": final_state.get("iteration", 0), + "evaluation": final_state.get("evaluation", {}), + "context_chunks": final_state.get("context_chunks", []), + "history": final_state.get("history", []), + "extracted_metadata": final_state.get("extracted_metadata", {}), + } + + def build_graph(self): + """Build the LangGraph state machine for the RAG workflow.""" + graph = StateGraph(dict) + graph.add_node("retrieve", self.nodes.retrieve) + graph.add_node("generate", self.nodes.generate) + graph.add_node("evaluate", self.nodes.evaluate) + + graph.add_edge(START, "retrieve") + graph.add_edge("retrieve", "generate") + graph.add_edge("generate", "evaluate") + graph.add_conditional_edges( + "evaluate", + self.nodes.route_from_evaluation, + {"refine": "generate", "complete": END}, + ) + + return graph.compile() diff --git a/backend/apps/ai/agent/nodes.py b/backend/apps/ai/agent/nodes.py new file mode 100644 index 0000000000..48622e953b --- /dev/null +++ b/backend/apps/ai/agent/nodes.py @@ -0,0 +1,269 @@ +"""LangGraph nodes for the Agentic RAG workflow.""" + +from __future__ import annotations + +import json +import os +from typing import Any + +import openai +from django.core.exceptions import ObjectDoesNotExist + +from apps.ai.agent.tools.rag.generator import Generator +from apps.ai.agent.tools.rag.retriever import Retriever +from apps.ai.common.constants import ( + DEFAULT_CHUNKS_RETRIEVAL_LIMIT, + DEFAULT_MAX_ITERATIONS, + DEFAULT_REASONING_MODEL, + DEFAULT_SIMILARITY_THRESHOLD, +) +from apps.core.models.prompt import Prompt + + +class AgentNodes: + """Collection of LangGraph node functions with injected dependencies.""" + + def __init__(self) -> None: + """Initialize AgentNodes.""" + if not (openai_api_key := os.getenv("DJANGO_OPEN_AI_SECRET_KEY")): + error_msg = "DJANGO_OPEN_AI_SECRET_KEY environment variable not set" + raise ValueError(error_msg) + + self.openai_client = openai.OpenAI(api_key=openai_api_key) + + self.retriever = Retriever() + self.generator = Generator() + + def retrieve(self, state: dict[str, Any]) -> dict[str, Any]: + """Retrieve context chunks based on the query.""" + if state.get("context_chunks"): + return state + + limit = state.get("limit", DEFAULT_CHUNKS_RETRIEVAL_LIMIT) + threshold = state.get("similarity_threshold", DEFAULT_SIMILARITY_THRESHOLD) + query = state["query"] + + if "extracted_metadata" not in state: + state["extracted_metadata"] = self.extract_query_metadata(query) + + metadata = state["extracted_metadata"] + + chunks = self.retriever.retrieve( + query=query, + limit=limit, + similarity_threshold=threshold, + content_types=metadata.get("entity_types"), + ) + + filtered_chunks = self.filter_chunks_by_metadata(chunks, metadata) + + state["context_chunks"] = filtered_chunks[:limit] + return state + + def generate(self, state: dict[str, Any]) -> dict[str, Any]: + """Generate an answer using the retrieved context.""" + iteration = state.get("iteration", 0) + 1 + feedback = state.get("feedback") + query = state["query"] + augmented_query = ( + query if not feedback else f"{query}\\n\\nRevise per feedback:\\n{feedback}" + ) + + answer = self.generator.generate_answer( + query=augmented_query, + context_chunks=state.get("context_chunks", []), + ) + + history = state.get("history", []) + history.append( + { + "iteration": iteration, + "feedback": feedback, + "query": augmented_query, + "answer": answer, + } + ) + + state.update( + {"answer": answer, "iteration": iteration, "history": history, "feedback": None} + ) + return state + + def evaluate(self, state: dict[str, Any]) -> dict[str, Any]: + """Evaluate the generated answer and decide on the next step.""" + answer = state.get("answer", "") + evaluation = self.call_evaluator( + query=state["query"], + answer=answer, + context_chunks=state.get("context_chunks", []), + ) + + history = state.get("history", []) + if history: + history[-1]["evaluation"] = evaluation + + if "missing context" in evaluation.get("justification", "").lower(): + limit = state.get("limit", DEFAULT_CHUNKS_RETRIEVAL_LIMIT) * 2 + threshold = state.get("similarity_threshold", DEFAULT_SIMILARITY_THRESHOLD) * 0.95 + + metadata = state.get("extracted_metadata", {}) + + new_chunks = self.retriever.retrieve( + query=state["query"], + limit=limit, + similarity_threshold=threshold, + content_types=metadata.get("entity_types"), + ) + + filtered_chunks = self.filter_chunks_by_metadata(new_chunks, metadata) + state["context_chunks"] = filtered_chunks[:limit] + + state["feedback"] = "Expand and refine answer using newly retrieved context." + else: + state["feedback"] = evaluation.get("feedback") or None + + state.update({"evaluation": evaluation, "history": history}) + return state + + def route_from_evaluation(self, state: dict[str, Any]) -> str: + """Route the workflow based on the evaluation result.""" + evaluation = state.get("evaluation") or {} + iteration = state.get("iteration", 0) + if evaluation.get("complete") or iteration >= DEFAULT_MAX_ITERATIONS: + return "complete" + return "refine" + + def filter_chunks_by_metadata( + self, + retrieved_chunks: list[dict[str, Any]], + query_metadata: dict[str, Any], + ) -> list[dict[str, Any]]: + """Rank and filter retrieved chunks using metadata and simple heuristics.""" + if not retrieved_chunks: + return [] + + requested_fields = query_metadata.get("requested_fields", []) + query_filters = query_metadata.get("filters", {}) + + if not requested_fields and not query_filters: + return retrieved_chunks + + ranked_chunks: list[tuple[dict[str, Any], float]] = [] + for chunk in retrieved_chunks: + relevance_score = 0.0 + chunk_metadata = chunk.get("additional_context", {}) + chunk_content = chunk.get("text", "").lower() + + for field_name in requested_fields: + if chunk_metadata.get(field_name): + relevance_score += 2 + + for filter_field, filter_value in query_filters.items(): + if filter_field in chunk_metadata: + metadata_value = chunk_metadata[filter_field] + + if isinstance(metadata_value, str) and isinstance(filter_value, str): + if filter_value.lower() in metadata_value.lower(): + relevance_score += 5 + + elif isinstance(metadata_value, list): + if any( + filter_value.lower() in str(item).lower() for item in metadata_value + ): + relevance_score += 5 + + elif metadata_value == filter_value: + relevance_score += 5 + + if isinstance(filter_value, str) and filter_value.lower() in chunk_content: + relevance_score += 3 + + if chunk_metadata: + relevance_score += len(chunk_metadata) * 0.1 + + ranked_chunks.append((chunk, relevance_score)) + + ranked_chunks.sort( + key=lambda entry: (entry[1], entry[0].get("similarity", 0)), reverse=True + ) + + return [chunk for chunk, _ in ranked_chunks[:DEFAULT_CHUNKS_RETRIEVAL_LIMIT]] + + def extract_query_metadata(self, query: str) -> dict[str, Any]: + """Extract metadata from the user's query using an LLM.""" + metadata_extractor_prompt = Prompt.get_metadata_extractor_prompt() + + if not metadata_extractor_prompt: + error_msg = "Prompt with key 'metadata-extractor-prompt' not found." + raise ObjectDoesNotExist(error_msg) + + try: + response = self.openai_client.chat.completions.create( + model=DEFAULT_REASONING_MODEL, + messages=[ + {"role": "system", "content": metadata_extractor_prompt}, + {"role": "user", "content": f"Query: {query}"}, + ], + max_tokens=500, + temperature=0.7, + ) + content = response.choices[0].message.content.strip() + + if "```json" in content: + content = content.split("```json")[1].split("```")[0].strip() + elif "```" in content: + content = content.split("```")[1].split("```")[0].strip() + + return json.loads(content) + + except (openai.OpenAIError, json.JSONDecodeError, ValueError): + return { + "requested_fields": [], + "entity_types": [], + "filters": {}, + "intent": "general query", + } + + def call_evaluator( + self, *, query: str, answer: str, context_chunks: list[dict[str, Any]] + ) -> dict[str, Any]: + """Call the evaluator LLM to assess the quality of the generated answer.""" + formatted_context = self.generator.prepare_context(context_chunks) + evaluation_prompt = ( + f"User Query:\\n{query}\\n\\n" + f"Candidate Answer:\\n{answer}\\n\\n" + f"Context Provided:\\n{formatted_context}\\n\\n" + "Respond with the mandated JSON object." + ) + + evaluator_system_prompt = Prompt.get_evaluator_system_prompt() + + if not evaluator_system_prompt: + error_msg = "Prompt with key 'evaluator-system-prompt' not found." + raise ObjectDoesNotExist(error_msg) + + try: + response = self.openai_client.chat.completions.create( + model=DEFAULT_REASONING_MODEL, + messages=[ + {"role": "system", "content": evaluator_system_prompt}, + {"role": "user", "content": evaluation_prompt}, + ], + max_tokens=2000, + temperature=0.7, + ) + content = response.choices[0].message.content.strip() + + if "```json" in content: + content = content.split("```json")[1].split("```")[0].strip() + elif "```" in content: + content = content.split("```")[1].split("```")[0].strip() + + return json.loads(content) + + except (openai.OpenAIError, json.JSONDecodeError, ValueError): + return { + "complete": False, + "feedback": "Evaluator error or invalid response.", + "justification": "Evaluator error or invalid response.", + } diff --git a/backend/apps/ai/agent/tools/rag/generator.py b/backend/apps/ai/agent/tools/rag/generator.py index 4721e9e9f8..a8e38cedee 100644 --- a/backend/apps/ai/agent/tools/rag/generator.py +++ b/backend/apps/ai/agent/tools/rag/generator.py @@ -16,7 +16,7 @@ class Generator: """Generates answers to user queries based on retrieved context.""" MAX_TOKENS = 2000 - TEMPERATURE = 0.4 + TEMPERATURE = 0.8 def __init__(self, chat_model: str = "gpt-4o"): """Initialize the Generator. @@ -73,16 +73,6 @@ def generate_answer(self, query: str, context_chunks: list[dict[str, Any]]) -> s formatted_context = self.prepare_context(context_chunks) user_prompt = f""" -- You are an assistant for question-answering tasks related to OWASP. -- Use the following pieces of retrieved context to answer the question. -- If the question is related to OWASP then you can try to answer based on your knowledge, if you -don't know the answer, just say that you don't know. -- Try to give answer and keep the answer concise, but you really think that the response will be -longer and better you will provide more information. -- Ask for the current location if the query is related to location. -- Ask for the information you need if the query is very personalized or user-centric. -- Do not mention or refer to the word "context", "based on context", "provided information", -"Information given to me" or similar phrases in your responses. Question: {query} Context: {formatted_context} Answer: diff --git a/backend/apps/ai/agent/tools/rag/rag_tool.py b/backend/apps/ai/agent/tools/rag/rag_tool.py deleted file mode 100644 index 8375b4a328..0000000000 --- a/backend/apps/ai/agent/tools/rag/rag_tool.py +++ /dev/null @@ -1,63 +0,0 @@ -"""A tool for orchestrating the components of RAG process.""" - -import logging - -from apps.ai.common.constants import DEFAULT_CHUNKS_RETRIEVAL_LIMIT, DEFAULT_SIMILARITY_THRESHOLD - -from .generator import Generator -from .retriever import Retriever - -logger = logging.getLogger(__name__) - - -class RagTool: - """Main RAG tool that orchestrates the retrieval and generation process.""" - - def __init__( - self, - embedding_model: str = "text-embedding-3-small", - chat_model: str = "gpt-4o", - ): - """Initialize the RAG tool. - - Args: - embedding_model (str, optional): The model to use for embeddings. - chat_model (str, optional): The model to use for chat generation. - - Raises: - ValueError: If the OpenAI API key is not set. - - """ - self.retriever = Retriever(embedding_model=embedding_model) - self.generator = Generator(chat_model=chat_model) - - def query( - self, - question: str, - content_types: list[str] | None = None, - limit: int = DEFAULT_CHUNKS_RETRIEVAL_LIMIT, - similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD, - ) -> str: - """Process a user query using the complete RAG pipeline. - - Args: - question (str): The user's question. - content_types (Optional[list[str]]): Content types to filter by. - limit (int): Maximum number of context chunks to retrieve. - similarity_threshold (float): Minimum similarity score for retrieval. - - Returns: - The generated answer as a string. - - """ - logger.info("Retrieving context for query") - - return self.generator.generate_answer( - context_chunks=self.retriever.retrieve( - content_types=content_types, - limit=limit, - query=question, - similarity_threshold=similarity_threshold, - ), - query=question, - ) diff --git a/backend/apps/ai/common/constants.py b/backend/apps/ai/common/constants.py index 636ef218cc..07942e7e5d 100644 --- a/backend/apps/ai/common/constants.py +++ b/backend/apps/ai/common/constants.py @@ -1,7 +1,9 @@ """AI app constants.""" +DEFAULT_CHUNKS_RETRIEVAL_LIMIT = 32 DEFAULT_LAST_REQUEST_OFFSET_SECONDS = 2 -DEFAULT_CHUNKS_RETRIEVAL_LIMIT = 8 +DEFAULT_MAX_ITERATIONS = 4 +DEFAULT_REASONING_MODEL = "gpt-4o" DEFAULT_SIMILARITY_THRESHOLD = 0.1 DELIMITER = "\n\n" GITHUB_REQUEST_INTERVAL_SECONDS = 0.5 diff --git a/backend/apps/ai/management/commands/ai_run_agentic_rag.py b/backend/apps/ai/management/commands/ai_run_agentic_rag.py new file mode 100644 index 0000000000..f8087f6295 --- /dev/null +++ b/backend/apps/ai/management/commands/ai_run_agentic_rag.py @@ -0,0 +1,34 @@ +"""Management command for running the agentic RAG workflow.""" + +from django.core.management.base import BaseCommand + +from apps.ai.agent.agent import AgenticRAGAgent + + +class Command(BaseCommand): + """Execute the LangGraph agentic RAG workflow.""" + + help = "Execute the LangGraph agentic RAG workflow" + + def add_arguments(self, parser): + """Add arguments to the command.""" + parser.add_argument( + "--query", + type=str, + required=False, + help="User query to answer", + default="can you list all flagship projects?", + ) + + def handle(self, *args, **options): + """Handle the command.""" + try: + agent = AgenticRAGAgent() + except ValueError as error: + self.stderr.write(self.style.ERROR(str(error))) + return + + result = agent.run(query=options["query"]) + + self.stdout.write(self.style.SUCCESS("Agentic RAG workflow completed")) + self.stdout.write(f"\nAnswer:\n{result.answer}") diff --git a/backend/apps/ai/management/commands/ai_run_rag_tool.py b/backend/apps/ai/management/commands/ai_run_rag_tool.py deleted file mode 100644 index cf27f3ba1a..0000000000 --- a/backend/apps/ai/management/commands/ai_run_rag_tool.py +++ /dev/null @@ -1,70 +0,0 @@ -"""A command for invoking RAG tool.""" - -from django.core.management.base import BaseCommand - -from apps.ai.agent.tools.rag.rag_tool import RagTool -from apps.ai.common.constants import ( - DEFAULT_CHUNKS_RETRIEVAL_LIMIT, - DEFAULT_SIMILARITY_THRESHOLD, -) - - -class Command(BaseCommand): - help = "Test the RagTool functionality with a sample query" - - def add_arguments(self, parser): - parser.add_argument( - "--query", - type=str, - default="What is OWASP Foundation?", - help="Query to test the Rag tool", - ) - parser.add_argument( - "--limit", - type=int, - default=DEFAULT_CHUNKS_RETRIEVAL_LIMIT, - help="Maximum number of results to retrieve", - ) - parser.add_argument( - "--threshold", - type=float, - default=DEFAULT_SIMILARITY_THRESHOLD, - help="Similarity threshold (0.0 to 1.0)", - ) - parser.add_argument( - "--content-types", - nargs="+", - default=None, - help="Content types to filter by (e.g., project chapter)", - ) - parser.add_argument( - "--embedding-model", - type=str, - default="text-embedding-3-small", - help="OpenAI embedding model", - ) - parser.add_argument( - "--chat-model", - type=str, - default="gpt-4o", - help="OpenAI chat model", - ) - - def handle(self, *args, **options): - try: - rag_tool = RagTool( - chat_model=options["chat_model"], - embedding_model=options["embedding_model"], - ) - except ValueError: - self.stderr.write(self.style.ERROR("Initialization error")) - return - - self.stdout.write("\nProcessing query...") - result = rag_tool.query( - content_types=options["content_types"], - limit=options["limit"], - question=options["query"], - similarity_threshold=options["threshold"], - ) - self.stdout.write(f"\nAnswer: {result}") diff --git a/backend/apps/ai/models/chunk.py b/backend/apps/ai/models/chunk.py index 521738297b..ad6f6c60cf 100644 --- a/backend/apps/ai/models/chunk.py +++ b/backend/apps/ai/models/chunk.py @@ -35,8 +35,8 @@ def bulk_save(chunks, fields=None): def split_text(text: str) -> list[str]: """Split text into chunks.""" return RecursiveCharacterTextSplitter( - chunk_size=500, - chunk_overlap=80, + chunk_size=200, + chunk_overlap=20, length_function=len, separators=["\n\n", "\n", " ", ""], ).split_text(text) diff --git a/backend/apps/core/migrations/0003_alter_prompt_text.py b/backend/apps/core/migrations/0003_alter_prompt_text.py new file mode 100644 index 0000000000..b9710509d3 --- /dev/null +++ b/backend/apps/core/migrations/0003_alter_prompt_text.py @@ -0,0 +1,17 @@ +# Generated by Django 5.2.6 on 2025-10-15 13:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("core", "0002_alter_prompt_key"), + ] + + operations = [ + migrations.AlterField( + model_name="prompt", + name="text", + field=models.TextField(blank=True, default="", max_length=2000, verbose_name="Text"), + ), + ] diff --git a/backend/apps/core/models/prompt.py b/backend/apps/core/models/prompt.py index 0f65750e43..654cc7d3cd 100644 --- a/backend/apps/core/models/prompt.py +++ b/backend/apps/core/models/prompt.py @@ -20,7 +20,7 @@ class Meta: name = models.CharField(verbose_name="Name", max_length=100) key = models.CharField(verbose_name="Key", max_length=100, unique=True, blank=True) - text = models.TextField(verbose_name="Text", max_length=1000, default="", blank=True) + text = models.TextField(verbose_name="Text", max_length=2000, default="", blank=True) def __str__(self): """Prompt human readable representation.""" @@ -50,6 +50,16 @@ def get_text(key: str) -> str: logger.warning("Prompt with key '%s' does not exist.", key) return "" + @staticmethod + def get_evaluator_system_prompt() -> str: + """Return evaluator system prompt. + + Returns: + str: The evaluator system prompt text. + + """ + return Prompt.get_text("evaluator-system-prompt") + @staticmethod def get_github_issue_hint() -> str: """Return GitHub issue hint prompt. @@ -80,6 +90,16 @@ def get_github_issue_project_summary() -> str: """ return Prompt.get_text("github-issue-project-summary") + @staticmethod + def get_metadata_extractor_prompt() -> str: + """Return metadata extractor prompt. + + Returns + str: The metadata extractor prompt text. + + """ + return Prompt.get_text("metadata-extractor-prompt") + @staticmethod def get_owasp_chapter_suggested_location() -> str: """Return OWASP chapter suggested location prompt. diff --git a/backend/apps/slack/common/handlers/ai.py b/backend/apps/slack/common/handlers/ai.py index ef0452e7b8..de90765418 100644 --- a/backend/apps/slack/common/handlers/ai.py +++ b/backend/apps/slack/common/handlers/ai.py @@ -4,7 +4,7 @@ import logging -from apps.ai.agent.tools.rag.rag_tool import RagTool +from apps.ai.agent.agent import AgenticRAGAgent from apps.slack.blocks import markdown logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ def get_blocks(query: str) -> list[dict]: def process_ai_query(query: str) -> str | None: - """Process the AI query using the RAG tool. + """Process the AI query using the agentic RAG agent. Args: query (str): The user's question. @@ -38,12 +38,9 @@ def process_ai_query(query: str) -> str | None: str | None: The AI response or None if error occurred. """ - rag_tool = RagTool( - chat_model="gpt-4o", - embedding_model="text-embedding-3-small", - ) - - return rag_tool.query(question=query) + agent = AgenticRAGAgent() + result = agent.run(query=query) + return result["answer"] def get_error_blocks() -> list[dict]: diff --git a/backend/apps/slack/events/app_mention.py b/backend/apps/slack/events/app_mention.py index aeb33243e9..2a97f05e07 100644 --- a/backend/apps/slack/events/app_mention.py +++ b/backend/apps/slack/events/app_mention.py @@ -2,6 +2,7 @@ import logging +from apps.slack.blocks import markdown from apps.slack.common.handlers.ai import get_blocks from apps.slack.events.event import EventBase @@ -34,10 +35,19 @@ def handle_event(self, event, client): logger.info("Handling app mention") + thread_ts = event.get("thread_ts") or event.get("ts") + + placeholder = client.chat_postMessage( + channel=channel_id, + blocks=[markdown("⏳ Thinking…")], + text="Thinking…", + thread_ts=thread_ts, + ) + reply_blocks = get_blocks(query=query) - client.chat_postMessage( + client.chat_update( channel=channel_id, + ts=placeholder["ts"], blocks=reply_blocks, text=query, - thread_ts=event.get("thread_ts") or event.get("ts"), ) diff --git a/backend/poetry.lock b/backend/poetry.lock index 67b6f29346..e2ae941e76 100644 --- a/backend/poetry.lock +++ b/backend/poetry.lock @@ -1735,6 +1735,74 @@ files = [ [package.dependencies] langchain-core = ">=0.3.75,<2.0.0" +[[package]] +name = "langgraph" +version = "0.6.10" +description = "Building stateful, multi-actor applications with LLMs" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "langgraph-0.6.10-py3-none-any.whl", hash = "sha256:b16baacd38895f6f4aa51e03b8a5b5f8695cff96fd0e8b637b725186ea27237c"}, + {file = "langgraph-0.6.10.tar.gz", hash = "sha256:37457595ef3becebca94b3c4711a8bcd539b5eae7560f2cec409eae0d8113c59"}, +] + +[package.dependencies] +langchain-core = ">=0.1" +langgraph-checkpoint = ">=2.1.0,<3.0.0" +langgraph-prebuilt = ">=0.6.0,<0.7.0" +langgraph-sdk = ">=0.2.2,<0.3.0" +pydantic = ">=2.7.4" +xxhash = ">=3.5.0" + +[[package]] +name = "langgraph-checkpoint" +version = "2.1.2" +description = "Library with base interfaces for LangGraph checkpoint savers." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "langgraph_checkpoint-2.1.2-py3-none-any.whl", hash = "sha256:911ebffb069fd01775d4b5184c04aaafc2962fcdf50cf49d524cd4367c4d0c60"}, + {file = "langgraph_checkpoint-2.1.2.tar.gz", hash = "sha256:112e9d067a6eff8937caf198421b1ffba8d9207193f14ac6f89930c1260c06f9"}, +] + +[package.dependencies] +langchain-core = ">=0.2.38" +ormsgpack = ">=1.10.0" + +[[package]] +name = "langgraph-prebuilt" +version = "0.6.4" +description = "Library with high-level APIs for creating and executing LangGraph agents and tools." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "langgraph_prebuilt-0.6.4-py3-none-any.whl", hash = "sha256:819f31d88b84cb2729ff1b79db2d51e9506b8fb7aaacfc0d359d4fe16e717344"}, + {file = "langgraph_prebuilt-0.6.4.tar.gz", hash = "sha256:e9e53b906ee5df46541d1dc5303239e815d3ec551e52bb03dd6463acc79ec28f"}, +] + +[package.dependencies] +langchain-core = ">=0.3.67" +langgraph-checkpoint = ">=2.1.0,<3.0.0" + +[[package]] +name = "langgraph-sdk" +version = "0.2.9" +description = "SDK for interacting with LangGraph API" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "langgraph_sdk-0.2.9-py3-none-any.whl", hash = "sha256:fbf302edadbf0fb343596f91c597794e936ef68eebc0d3e1d358b6f9f72a1429"}, + {file = "langgraph_sdk-0.2.9.tar.gz", hash = "sha256:b3bd04c6be4fa382996cd2be8fbc1e7cc94857d2bc6b6f4599a7f2a245975303"}, +] + +[package.dependencies] +httpx = ">=0.25.2" +orjson = ">=3.10.1" + [[package]] name = "langsmith" version = "0.4.31" @@ -2332,7 +2400,6 @@ description = "Fast, correct Python JSON library supporting dataclasses, datetim optional = false python-versions = ">=3.9" groups = ["main"] -markers = "platform_python_implementation != \"PyPy\"" files = [ {file = "orjson-3.11.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:29cb1f1b008d936803e2da3d7cba726fc47232c45df531b29edf0b232dd737e7"}, {file = "orjson-3.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97dceed87ed9139884a55db8722428e27bd8452817fbf1869c58b49fecab1120"}, @@ -2419,6 +2486,72 @@ files = [ {file = "orjson-3.11.3.tar.gz", hash = "sha256:1c0603b1d2ffcd43a411d64797a19556ef76958aef1c182f22dc30860152a98a"}, ] +[[package]] +name = "ormsgpack" +version = "1.11.0" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "ormsgpack-1.11.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:03d4e658dd6e1882a552ce1d13cc7b49157414e7d56a4091fbe7823225b08cba"}, + {file = "ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bb67eb913c2b703f0ed39607fc56e50724dd41f92ce080a586b4d6149eb3fe4"}, + {file = "ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e54175b92411f73a238e5653a998627f6660de3def37d9dd7213e0fd264ca56"}, + {file = "ormsgpack-1.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca2b197f4556e1823d1319869d4c5dc278be335286d2308b0ed88b59a5afcc25"}, + {file = "ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bc62388262f58c792fe1e450e1d9dbcc174ed2fb0b43db1675dd7c5ff2319d6a"}, + {file = "ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c48bc10af74adfbc9113f3fb160dc07c61ad9239ef264c17e449eba3de343dc2"}, + {file = "ormsgpack-1.11.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a608d3a1d4fa4acdc5082168a54513cff91f47764cef435e81a483452f5f7647"}, + {file = "ormsgpack-1.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:97217b4f7f599ba45916b9c4c4b1d5656e8e2a4d91e2e191d72a7569d3c30923"}, + {file = "ormsgpack-1.11.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:c7be823f47d8e36648d4bc90634b93f02b7d7cc7480081195f34767e86f181fb"}, + {file = "ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68accf15d1b013812755c0eb7a30e1fc2f81eb603a1a143bf0cda1b301cfa797"}, + {file = "ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:805d06fb277d9a4e503c0c707545b49cde66cbb2f84e5cf7c58d81dfc20d8658"}, + {file = "ormsgpack-1.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1e57cdf003e77acc43643bda151dc01f97147a64b11cdee1380bb9698a7601c"}, + {file = "ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:37fc05bdaabd994097c62e2f3e08f66b03f856a640ede6dc5ea340bd15b77f4d"}, + {file = "ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:a6e9db6c73eb46b2e4d97bdffd1368a66f54e6806b563a997b19c004ef165e1d"}, + {file = "ormsgpack-1.11.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e9c44eae5ac0196ffc8b5ed497c75511056508f2303fa4d36b208eb820cf209e"}, + {file = "ormsgpack-1.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:11d0dfaf40ae7c6de4f7dbd1e4892e2e6a55d911ab1774357c481158d17371e4"}, + {file = "ormsgpack-1.11.0-cp311-cp311-win_arm64.whl", hash = "sha256:0c63a3f7199a3099c90398a1bdf0cb577b06651a442dc5efe67f2882665e5b02"}, + {file = "ormsgpack-1.11.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:3434d0c8d67de27d9010222de07fb6810fb9af3bb7372354ffa19257ac0eb83b"}, + {file = "ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2da5bd097e8dbfa4eb0d4ccfe79acd6f538dee4493579e2debfe4fc8f4ca89b"}, + {file = "ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fdbaa0a5a8606a486960b60c24f2d5235d30ac7a8b98eeaea9854bffef14dc3d"}, + {file = "ormsgpack-1.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3682f24f800c1837017ee90ce321086b2cbaef88db7d4cdbbda1582aa6508159"}, + {file = "ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcca21202bb05ccbf3e0e92f560ee59b9331182e4c09c965a28155efbb134993"}, + {file = "ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c30e5c4655ba46152d722ec7468e8302195e6db362ec1ae2c206bc64f6030e43"}, + {file = "ormsgpack-1.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7138a341f9e2c08c59368f03d3be25e8b87b3baaf10d30fb1f6f6b52f3d47944"}, + {file = "ormsgpack-1.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:d4bd8589b78a11026d47f4edf13c1ceab9088bb12451f34396afe6497db28a27"}, + {file = "ormsgpack-1.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:e5e746a1223e70f111d4001dab9585ac8639eee8979ca0c8db37f646bf2961da"}, + {file = "ormsgpack-1.11.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e7b36ab7b45cb95217ae1f05f1318b14a3e5ef73cb00804c0f06233f81a14e8"}, + {file = "ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43402d67e03a9a35cc147c8c03f0c377cad016624479e1ee5b879b8425551484"}, + {file = "ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:64fd992f932764d6306b70ddc755c1bc3405c4c6a69f77a36acf7af1c8f5ada4"}, + {file = "ormsgpack-1.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0362fb7fe4a29c046c8ea799303079a09372653a1ce5a5a588f3bbb8088368d0"}, + {file = "ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:de2f7a65a9d178ed57be49eba3d0fc9b833c32beaa19dbd4ba56014d3c20b152"}, + {file = "ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:f38cfae95461466055af966fc922d06db4e1654966385cda2828653096db34da"}, + {file = "ormsgpack-1.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c88396189d238f183cea7831b07a305ab5c90d6d29b53288ae11200bd956357b"}, + {file = "ormsgpack-1.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:5403d1a945dd7c81044cebeca3f00a28a0f4248b33242a5d2d82111628043725"}, + {file = "ormsgpack-1.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:c57357b8d43b49722b876edf317bdad9e6d52071b523fdd7394c30cd1c67d5a0"}, + {file = "ormsgpack-1.11.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d390907d90fd0c908211592c485054d7a80990697ef4dff4e436ac18e1aab98a"}, + {file = "ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6153c2e92e789509098e04c9aa116b16673bd88ec78fbe0031deeb34ab642d10"}, + {file = "ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2b2c2a065a94d742212b2018e1fecd8f8d72f3c50b53a97d1f407418093446d"}, + {file = "ormsgpack-1.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:110e65b5340f3d7ef8b0009deae3c6b169437e6b43ad5a57fd1748085d29d2ac"}, + {file = "ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c27e186fca96ab34662723e65b420919910acbbc50fc8e1a44e08f26268cb0e0"}, + {file = "ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d56b1f877c13d499052d37a3db2378a97d5e1588d264f5040b3412aee23d742c"}, + {file = "ormsgpack-1.11.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c88e28cd567c0a3269f624b4ade28142d5e502c8e826115093c572007af5be0a"}, + {file = "ormsgpack-1.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:8811160573dc0a65f62f7e0792c4ca6b7108dfa50771edb93f9b84e2d45a08ae"}, + {file = "ormsgpack-1.11.0-cp314-cp314-win_arm64.whl", hash = "sha256:23e30a8d3c17484cf74e75e6134322255bd08bc2b5b295cc9c442f4bae5f3c2d"}, + {file = "ormsgpack-1.11.0-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2905816502adfaf8386a01dd85f936cd378d243f4f5ee2ff46f67f6298dc90d5"}, + {file = "ormsgpack-1.11.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c04402fb9a0a9b9f18fbafd6d5f8398ee99b3ec619fb63952d3a954bc9d47daa"}, + {file = "ormsgpack-1.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a025ec07ac52056ecfd9e57b5cbc6fff163f62cb9805012b56cda599157f8ef2"}, + {file = "ormsgpack-1.11.0-cp39-cp39-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:354c6a5039faf63b63d8f42ec7915583a4a56e10b319284370a5a89c4382d985"}, + {file = "ormsgpack-1.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7058c85cc13dd329bc7b528e38626c6babcd0066d6e9163330a1509fe0aa4707"}, + {file = "ormsgpack-1.11.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4e15b634be324fb18dab7aa82ab929a0d57d42c12650ae3dedd07d8d31b17733"}, + {file = "ormsgpack-1.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6329e6eae9dfe600962739a6e060ea82885ec58b8338875c5ac35080da970f94"}, + {file = "ormsgpack-1.11.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b27546c28f92b9eb757620f7f1ed89fb7b07be3b9f4ba1b7de75761ec1c4bcc8"}, + {file = "ormsgpack-1.11.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:26a17919d9144b4ac7112dbbadef07927abbe436be2cf99a703a19afe7dd5c8b"}, + {file = "ormsgpack-1.11.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5352868ee4cdc00656bf216b56bc654f72ac3008eb36e12561f6337bb7104b45"}, + {file = "ormsgpack-1.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:2ffe36f1f441a40949e8587f5aa3d3fc9f100576925aab667117403eab494338"}, + {file = "ormsgpack-1.11.0.tar.gz", hash = "sha256:7c9988e78fedba3292541eb3bb274fa63044ef4da2ddb47259ea70c05dee4206"}, +] + [[package]] name = "owasp-schema" version = "0.1.23" @@ -3276,6 +3409,13 @@ optional = false python-versions = ">=3.8" groups = ["main", "dev"] files = [ + {file = "PyYAML-6.0.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3"}, + {file = "PyYAML-6.0.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6"}, + {file = "PyYAML-6.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369"}, + {file = "PyYAML-6.0.3-cp38-cp38-win32.whl", hash = "sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295"}, + {file = "PyYAML-6.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b"}, {file = "pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b"}, {file = "pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956"}, {file = "pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8"}, @@ -4017,6 +4157,62 @@ optional = false python-versions = ">=3.7" groups = ["main"] files = [ + {file = "SQLAlchemy-2.0.43-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:21ba7a08a4253c5825d1db389d4299f64a100ef9800e4624c8bf70d8f136e6ed"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11b9503fa6f8721bef9b8567730f664c5a5153d25e247aadc69247c4bc605227"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07097c0a1886c150ef2adba2ff7437e84d40c0f7dcb44a2c2b9c905ccfc6361c"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:cdeff998cb294896a34e5b2f00e383e7c5c4ef3b4bfa375d9104723f15186443"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:bcf0724a62a5670e5718957e05c56ec2d6850267ea859f8ad2481838f889b42c"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-win32.whl", hash = "sha256:c697575d0e2b0a5f0433f679bda22f63873821d991e95a90e9e52aae517b2e32"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-win_amd64.whl", hash = "sha256:d34c0f6dbefd2e816e8f341d0df7d4763d382e3f452423e752ffd1e213da2512"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70322986c0c699dca241418fcf18e637a4369e0ec50540a2b907b184c8bca069"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87accdbba88f33efa7b592dc2e8b2a9c2cdbca73db2f9d5c510790428c09c154"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c00e7845d2f692ebfc7d5e4ec1a3fd87698e4337d09e58d6749a16aedfdf8612"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:022e436a1cb39b13756cf93b48ecce7aa95382b9cfacceb80a7d263129dfd019"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c5e73ba0d76eefc82ec0219d2301cb33bfe5205ed7a2602523111e2e56ccbd20"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9c2e02f06c68092b875d5cbe4824238ab93a7fa35d9c38052c033f7ca45daa18"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-win32.whl", hash = "sha256:e7a903b5b45b0d9fa03ac6a331e1c1d6b7e0ab41c63b6217b3d10357b83c8b00"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-win_amd64.whl", hash = "sha256:4bf0edb24c128b7be0c61cd17eef432e4bef507013292415f3fb7023f02b7d4b"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:52d9b73b8fb3e9da34c2b31e6d99d60f5f99fd8c1225c9dad24aeb74a91e1d29"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f42f23e152e4545157fa367b2435a1ace7571cab016ca26038867eb7df2c3631"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fb1a8c5438e0c5ea51afe9c6564f951525795cf432bed0c028c1cb081276685"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db691fa174e8f7036afefe3061bc40ac2b770718be2862bfb03aabae09051aca"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe2b3b4927d0bc03d02ad883f402d5de201dbc8894ac87d2e981e7d87430e60d"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4d3d9b904ad4a6b175a2de0738248822f5ac410f52c2fd389ada0b5262d6a1e3"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-win32.whl", hash = "sha256:5cda6b51faff2639296e276591808c1726c4a77929cfaa0f514f30a5f6156921"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-win_amd64.whl", hash = "sha256:c5d1730b25d9a07727d20ad74bc1039bbbb0a6ca24e6769861c1aa5bf2c4c4a8"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:20d81fc2736509d7a2bd33292e489b056cbae543661bb7de7ce9f1c0cd6e7f24"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b9fc27650ff5a2c9d490c13c14906b918b0de1f8fcbb4c992712d8caf40e83"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6772e3ca8a43a65a37c88e2f3e2adfd511b0b1da37ef11ed78dea16aeae85bd9"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a113da919c25f7f641ffbd07fbc9077abd4b3b75097c888ab818f962707eb48"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4286a1139f14b7d70141c67a8ae1582fc2b69105f1b09d9573494eb4bb4b2687"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:529064085be2f4d8a6e5fab12d36ad44f1909a18848fcfbdb59cc6d4bbe48efe"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-win32.whl", hash = "sha256:b535d35dea8bbb8195e7e2b40059e2253acb2b7579b73c1b432a35363694641d"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-win_amd64.whl", hash = "sha256:1c6d85327ca688dbae7e2b06d7d84cfe4f3fffa5b5f9e21bb6ce9d0e1a0e0e0a"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e7c08f57f75a2bb62d7ee80a89686a5e5669f199235c6d1dac75cd59374091c3"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:14111d22c29efad445cd5021a70a8b42f7d9152d8ba7f73304c4d82460946aaa"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21b27b56eb2f82653168cefe6cb8e970cdaf4f3a6cb2c5e3c3c1cf3158968ff9"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c5a9da957c56e43d72126a3f5845603da00e0293720b03bde0aacffcf2dc04f"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d79f9fdc9584ec83d1b3c75e9f4595c49017f5594fee1a2217117647225d738"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9df7126fd9db49e3a5a3999442cc67e9ee8971f3cb9644250107d7296cb2a164"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-win32.whl", hash = "sha256:7f1ac7828857fcedb0361b48b9ac4821469f7694089d15550bbcf9ab22564a1d"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-win_amd64.whl", hash = "sha256:971ba928fcde01869361f504fcff3b7143b47d30de188b11c6357c0505824197"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e6aeb2e0932f32950cf56a8b4813cb15ff792fc0c9b3752eaf067cfe298496a"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:61f964a05356f4bca4112e6334ed7c208174511bd56e6b8fc86dad4d024d4185"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46293c39252f93ea0910aababa8752ad628bcce3a10d3f260648dd472256983f"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:136063a68644eca9339d02e6693932116f6a8591ac013b0014479a1de664e40a"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6e2bf13d9256398d037fef09fd8bf9b0bf77876e22647d10761d35593b9ac547"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:44337823462291f17f994d64282a71c51d738fc9ef561bf265f1d0fd9116a782"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-win32.whl", hash = "sha256:13194276e69bb2af56198fef7909d48fd34820de01d9c92711a5fa45497cc7ed"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-win_amd64.whl", hash = "sha256:334f41fa28de9f9be4b78445e68530da3c5fa054c907176460c81494f4ae1f5e"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ceb5c832cc30663aeaf5e39657712f4c4241ad1f638d487ef7216258f6d41fe7"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11f43c39b4b2ec755573952bbcc58d976779d482f6f832d7f33a8d869ae891bf"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:413391b2239db55be14fa4223034d7e13325a1812c8396ecd4f2c08696d5ccad"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c379e37b08c6c527181a397212346be39319fb64323741d23e46abd97a400d34"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03d73ab2a37d9e40dec4984d1813d7878e01dbdc742448d44a7341b7a9f408c7"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8cee08f15d9e238ede42e9bbc1d6e7158d0ca4f176e4eab21f88ac819ae3bd7b"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-win32.whl", hash = "sha256:b3edaec7e8b6dc5cd94523c6df4f294014df67097c8217a89929c99975811414"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-win_amd64.whl", hash = "sha256:227119ce0a89e762ecd882dc661e0aa677a690c914e358f0dd8932a2e8b2765b"}, + {file = "sqlalchemy-2.0.43-py3-none-any.whl", hash = "sha256:1681c21dd2ccee222c2fe0bef671d1aef7c504087c9c4e800371cfcc8ac966fc"}, {file = "sqlalchemy-2.0.43.tar.gz", hash = "sha256:788bfcef6787a7764169cfe9859fe425bf44559619e1d9f56f5bddf2ebf6f417"}, ] @@ -4288,6 +4484,156 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] +[[package]] +name = "xxhash" +version = "3.6.0" +description = "Python binding for xxHash" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "xxhash-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87ff03d7e35c61435976554477a7f4cd1704c3596a89a8300d5ce7fc83874a71"}, + {file = "xxhash-3.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f572dfd3d0e2eb1a57511831cf6341242f5a9f8298a45862d085f5b93394a27d"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:89952ea539566b9fed2bbd94e589672794b4286f342254fad28b149f9615fef8"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e6f2ffb07a50b52465a1032c3cf1f4a5683f944acaca8a134a2f23674c2058"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b5b848ad6c16d308c3ac7ad4ba6bede80ed5df2ba8ed382f8932df63158dd4b2"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a034590a727b44dd8ac5914236a7b8504144447a9682586c3327e935f33ec8cc"}, + {file = "xxhash-3.6.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8a8f1972e75ebdd161d7896743122834fe87378160c20e97f8b09166213bf8cc"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ee34327b187f002a596d7b167ebc59a1b729e963ce645964bbc050d2f1b73d07"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:339f518c3c7a850dd033ab416ea25a692759dc7478a71131fe8869010d2b75e4"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bf48889c9630542d4709192578aebbd836177c9f7a4a2778a7d6340107c65f06"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:5576b002a56207f640636056b4160a378fe36a58db73ae5c27a7ec8db35f71d4"}, + {file = "xxhash-3.6.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af1f3278bd02814d6dedc5dec397993b549d6f16c19379721e5a1d31e132c49b"}, + {file = "xxhash-3.6.0-cp310-cp310-win32.whl", hash = "sha256:aed058764db109dc9052720da65fafe84873b05eb8b07e5e653597951af57c3b"}, + {file = "xxhash-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:e82da5670f2d0d98950317f82a0e4a0197150ff19a6df2ba40399c2a3b9ae5fb"}, + {file = "xxhash-3.6.0-cp310-cp310-win_arm64.whl", hash = "sha256:4a082ffff8c6ac07707fb6b671caf7c6e020c75226c561830b73d862060f281d"}, + {file = "xxhash-3.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b47bbd8cf2d72797f3c2772eaaac0ded3d3af26481a26d7d7d41dc2d3c46b04a"}, + {file = "xxhash-3.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2b6821e94346f96db75abaa6e255706fb06ebd530899ed76d32cd99f20dc52fa"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d0a9751f71a1a65ce3584e9cae4467651c7e70c9d31017fa57574583a4540248"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b29ee68625ab37b04c0b40c3fafdf24d2f75ccd778333cfb698f65f6c463f62"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6812c25fe0d6c36a46ccb002f40f27ac903bf18af9f6dd8f9669cb4d176ab18f"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4ccbff013972390b51a18ef1255ef5ac125c92dc9143b2d1909f59abc765540e"}, + {file = "xxhash-3.6.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:297b7fbf86c82c550e12e8fb71968b3f033d27b874276ba3624ea868c11165a8"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dea26ae1eb293db089798d3973a5fc928a18fdd97cc8801226fae705b02b14b0"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7a0b169aafb98f4284f73635a8e93f0735f9cbde17bd5ec332480484241aaa77"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:08d45aef063a4531b785cd72de4887766d01dc8f362a515693df349fdb825e0c"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:929142361a48ee07f09121fe9e96a84950e8d4df3bb298ca5d88061969f34d7b"}, + {file = "xxhash-3.6.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:51312c768403d8540487dbbfb557454cfc55589bbde6424456951f7fcd4facb3"}, + {file = "xxhash-3.6.0-cp311-cp311-win32.whl", hash = "sha256:d1927a69feddc24c987b337ce81ac15c4720955b667fe9b588e02254b80446fd"}, + {file = "xxhash-3.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:26734cdc2d4ffe449b41d186bbeac416f704a482ed835d375a5c0cb02bc63fef"}, + {file = "xxhash-3.6.0-cp311-cp311-win_arm64.whl", hash = "sha256:d72f67ef8bf36e05f5b6c65e8524f265bd61071471cd4cf1d36743ebeeeb06b7"}, + {file = "xxhash-3.6.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:01362c4331775398e7bb34e3ab403bc9ee9f7c497bc7dee6272114055277dd3c"}, + {file = "xxhash-3.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7b2df81a23f8cb99656378e72501b2cb41b1827c0f5a86f87d6b06b69f9f204"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dc94790144e66b14f67b10ac8ed75b39ca47536bf8800eb7c24b50271ea0c490"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93f107c673bccf0d592cdba077dedaf52fe7f42dcd7676eba1f6d6f0c3efffd2"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2aa5ee3444c25b69813663c9f8067dcfaa2e126dc55e8dddf40f4d1c25d7effa"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7f99123f0e1194fa59cc69ad46dbae2e07becec5df50a0509a808f90a0f03f0"}, + {file = "xxhash-3.6.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49e03e6fe2cac4a1bc64952dd250cf0dbc5ef4ebb7b8d96bce82e2de163c82a2"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bd17fede52a17a4f9a7bc4472a5867cb0b160deeb431795c0e4abe158bc784e9"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6fb5f5476bef678f69db04f2bd1efbed3030d2aba305b0fc1773645f187d6a4e"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:843b52f6d88071f87eba1631b684fcb4b2068cd2180a0224122fe4ef011a9374"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7d14a6cfaf03b1b6f5f9790f76880601ccc7896aff7ab9cd8978a939c1eb7e0d"}, + {file = "xxhash-3.6.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:418daf3db71e1413cfe211c2f9a528456936645c17f46b5204705581a45390ae"}, + {file = "xxhash-3.6.0-cp312-cp312-win32.whl", hash = "sha256:50fc255f39428a27299c20e280d6193d8b63b8ef8028995323bf834a026b4fbb"}, + {file = "xxhash-3.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:c0f2ab8c715630565ab8991b536ecded9416d615538be8ecddce43ccf26cbc7c"}, + {file = "xxhash-3.6.0-cp312-cp312-win_arm64.whl", hash = "sha256:eae5c13f3bc455a3bbb68bdc513912dc7356de7e2280363ea235f71f54064829"}, + {file = "xxhash-3.6.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:599e64ba7f67472481ceb6ee80fa3bd828fd61ba59fb11475572cc5ee52b89ec"}, + {file = "xxhash-3.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d8b8aaa30fca4f16f0c84a5c8d7ddee0e25250ec2796c973775373257dde8f1"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d597acf8506d6e7101a4a44a5e428977a51c0fadbbfd3c39650cca9253f6e5a6"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:858dc935963a33bc33490128edc1c12b0c14d9c7ebaa4e387a7869ecc4f3e263"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ba284920194615cb8edf73bf52236ce2e1664ccd4a38fdb543506413529cc546"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4b54219177f6c6674d5378bd862c6aedf64725f70dd29c472eaae154df1a2e89"}, + {file = "xxhash-3.6.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:42c36dd7dbad2f5238950c377fcbf6811b1cdb1c444fab447960030cea60504d"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f22927652cba98c44639ffdc7aaf35828dccf679b10b31c4ad72a5b530a18eb7"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b45fad44d9c5c119e9c6fbf2e1c656a46dc68e280275007bbfd3d572b21426db"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6f2580ffab1a8b68ef2b901cde7e55fa8da5e4be0977c68f78fc80f3c143de42"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:40c391dd3cd041ebc3ffe6f2c862f402e306eb571422e0aa918d8070ba31da11"}, + {file = "xxhash-3.6.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f205badabde7aafd1a31e8ca2a3e5a763107a71c397c4481d6a804eb5063d8bd"}, + {file = "xxhash-3.6.0-cp313-cp313-win32.whl", hash = "sha256:2577b276e060b73b73a53042ea5bd5203d3e6347ce0d09f98500f418a9fcf799"}, + {file = "xxhash-3.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:757320d45d2fbcce8f30c42a6b2f47862967aea7bf458b9625b4bbe7ee390392"}, + {file = "xxhash-3.6.0-cp313-cp313-win_arm64.whl", hash = "sha256:457b8f85dec5825eed7b69c11ae86834a018b8e3df5e77783c999663da2f96d6"}, + {file = "xxhash-3.6.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a42e633d75cdad6d625434e3468126c73f13f7584545a9cf34e883aa1710e702"}, + {file = "xxhash-3.6.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:568a6d743219e717b07b4e03b0a828ce593833e498c3b64752e0f5df6bfe84db"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:bec91b562d8012dae276af8025a55811b875baace6af510412a5e58e3121bc54"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78e7f2f4c521c30ad5e786fdd6bae89d47a32672a80195467b5de0480aa97b1f"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3ed0df1b11a79856df5ffcab572cbd6b9627034c1c748c5566fa79df9048a7c5"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e4edbfc7d420925b0dd5e792478ed393d6e75ff8fc219a6546fb446b6a417b1"}, + {file = "xxhash-3.6.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fba27a198363a7ef87f8c0f6b171ec36b674fe9053742c58dd7e3201c1ab30ee"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:794fe9145fe60191c6532fa95063765529770edcdd67b3d537793e8004cabbfd"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6105ef7e62b5ac73a837778efc331a591d8442f8ef5c7e102376506cb4ae2729"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:f01375c0e55395b814a679b3eea205db7919ac2af213f4a6682e01220e5fe292"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d706dca2d24d834a4661619dcacf51a75c16d65985718d6a7d73c1eeeb903ddf"}, + {file = "xxhash-3.6.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f059d9faeacd49c0215d66f4056e1326c80503f51a1532ca336a385edadd033"}, + {file = "xxhash-3.6.0-cp313-cp313t-win32.whl", hash = "sha256:1244460adc3a9be84731d72b8e80625788e5815b68da3da8b83f78115a40a7ec"}, + {file = "xxhash-3.6.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b1e420ef35c503869c4064f4a2f2b08ad6431ab7b229a05cce39d74268bca6b8"}, + {file = "xxhash-3.6.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ec44b73a4220623235f67a996c862049f375df3b1052d9899f40a6382c32d746"}, + {file = "xxhash-3.6.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a40a3d35b204b7cc7643cbcf8c9976d818cb47befcfac8bbefec8038ac363f3e"}, + {file = "xxhash-3.6.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a54844be970d3fc22630b32d515e79a90d0a3ddb2644d8d7402e3c4c8da61405"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:016e9190af8f0a4e3741343777710e3d5717427f175adfdc3e72508f59e2a7f3"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4f6f72232f849eb9d0141e2ebe2677ece15adfd0fa599bc058aad83c714bb2c6"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:63275a8aba7865e44b1813d2177e0f5ea7eadad3dd063a21f7cf9afdc7054063"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cd01fa2aa00d8b017c97eb46b9a794fbdca53fc14f845f5a328c71254b0abb7"}, + {file = "xxhash-3.6.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0226aa89035b62b6a86d3c68df4d7c1f47a342b8683da2b60cedcddb46c4d95b"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c6e193e9f56e4ca4923c61238cdaced324f0feac782544eb4c6d55ad5cc99ddd"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:9176dcaddf4ca963d4deb93866d739a343c01c969231dbe21680e13a5d1a5bf0"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c1ce4009c97a752e682b897aa99aef84191077a9433eb237774689f14f8ec152"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:8cb2f4f679b01513b7adbb9b1b2f0f9cdc31b70007eaf9d59d0878809f385b11"}, + {file = "xxhash-3.6.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:653a91d7c2ab54a92c19ccf43508b6a555440b9be1bc8be553376778be7f20b5"}, + {file = "xxhash-3.6.0-cp314-cp314-win32.whl", hash = "sha256:a756fe893389483ee8c394d06b5ab765d96e68fbbfe6fde7aa17e11f5720559f"}, + {file = "xxhash-3.6.0-cp314-cp314-win_amd64.whl", hash = "sha256:39be8e4e142550ef69629c9cd71b88c90e9a5db703fecbcf265546d9536ca4ad"}, + {file = "xxhash-3.6.0-cp314-cp314-win_arm64.whl", hash = "sha256:25915e6000338999236f1eb68a02a32c3275ac338628a7eaa5a269c401995679"}, + {file = "xxhash-3.6.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c5294f596a9017ca5a3e3f8884c00b91ab2ad2933cf288f4923c3fd4346cf3d4"}, + {file = "xxhash-3.6.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1cf9dcc4ab9cff01dfbba78544297a3a01dafd60f3bde4e2bfd016cf7e4ddc67"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:01262da8798422d0685f7cef03b2bd3f4f46511b02830861df548d7def4402ad"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51a73fb7cb3a3ead9f7a8b583ffd9b8038e277cdb8cb87cf890e88b3456afa0b"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b9c6df83594f7df8f7f708ce5ebeacfc69f72c9fbaaababf6cf4758eaada0c9b"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:627f0af069b0ea56f312fd5189001c24578868643203bca1abbc2c52d3a6f3ca"}, + {file = "xxhash-3.6.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa912c62f842dfd013c5f21a642c9c10cd9f4c4e943e0af83618b4a404d9091a"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:b465afd7909db30168ab62afe40b2fcf79eedc0b89a6c0ab3123515dc0df8b99"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a881851cf38b0a70e7c4d3ce81fc7afd86fbc2a024f4cfb2a97cf49ce04b75d3"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9b3222c686a919a0f3253cfc12bb118b8b103506612253b5baeaac10d8027cf6"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:c5aa639bc113e9286137cec8fadc20e9cd732b2cc385c0b7fa673b84fc1f2a93"}, + {file = "xxhash-3.6.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5c1343d49ac102799905e115aee590183c3921d475356cb24b4de29a4bc56518"}, + {file = "xxhash-3.6.0-cp314-cp314t-win32.whl", hash = "sha256:5851f033c3030dd95c086b4a36a2683c2ff4a799b23af60977188b057e467119"}, + {file = "xxhash-3.6.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0444e7967dac37569052d2409b00a8860c2135cff05502df4da80267d384849f"}, + {file = "xxhash-3.6.0-cp314-cp314t-win_arm64.whl", hash = "sha256:bb79b1e63f6fd84ec778a4b1916dfe0a7c3fdb986c06addd5db3a0d413819d95"}, + {file = "xxhash-3.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7dac94fad14a3d1c92affb661021e1d5cbcf3876be5f5b4d90730775ccb7ac41"}, + {file = "xxhash-3.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6965e0e90f1f0e6cb78da568c13d4a348eeb7f40acfd6d43690a666a459458b8"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2ab89a6b80f22214b43d98693c30da66af910c04f9858dd39c8e570749593d7e"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4903530e866b7a9c1eadfd3fa2fbe1b97d3aed4739a80abf506eb9318561c850"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4da8168ae52c01ac64c511d6f4a709479da8b7a4a1d7621ed51652f93747dffa"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:97460eec202017f719e839a0d3551fbc0b2fcc9c6c6ffaa5af85bbd5de432788"}, + {file = "xxhash-3.6.0-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45aae0c9df92e7fa46fbb738737324a563c727990755ec1965a6a339ea10a1df"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:0d50101e57aad86f4344ca9b32d091a2135a9d0a4396f19133426c88025b09f1"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9085e798c163ce310d91f8aa6b325dda3c2944c93c6ce1edb314030d4167cc65"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:a87f271a33fad0e5bf3be282be55d78df3a45ae457950deb5241998790326f87"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:9e040d3e762f84500961791fa3709ffa4784d4dcd7690afc655c095e02fff05f"}, + {file = "xxhash-3.6.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b0359391c3dad6de872fefb0cf5b69d55b0655c55ee78b1bb7a568979b2ce96b"}, + {file = "xxhash-3.6.0-cp38-cp38-win32.whl", hash = "sha256:e4ff728a2894e7f436b9e94c667b0f426b9c74b71f900cf37d5468c6b5da0536"}, + {file = "xxhash-3.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:01be0c5b500c5362871fc9cfdf58c69b3e5c4f531a82229ddb9eb1eb14138004"}, + {file = "xxhash-3.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cc604dc06027dbeb8281aeac5899c35fcfe7c77b25212833709f0bff4ce74d2a"}, + {file = "xxhash-3.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:277175a73900ad43a8caeb8b99b9604f21fe8d7c842f2f9061a364a7e220ddb7"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cfbc5b91397c8c2972fdac13fb3e4ed2f7f8ccac85cd2c644887557780a9b6e2"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2762bfff264c4e73c0e507274b40634ff465e025f0eaf050897e88ec8367575d"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2f171a900d59d51511209f7476933c34a0c2c711078d3c80e74e0fe4f38680ec"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:780b90c313348f030b811efc37b0fa1431163cb8db8064cf88a7936b6ce5f222"}, + {file = "xxhash-3.6.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b242455eccdfcd1fa4134c431a30737d2b4f045770f8fe84356b3469d4b919"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a75ffc1bd5def584129774c158e108e5d768e10b75813f2b32650bb041066ed6"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1fc1ed882d1e8df932a66e2999429ba6cc4d5172914c904ab193381fba825360"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:44e342e8cc11b4e79dae5c57f2fb6360c3c20cc57d32049af8f567f5b4bcb5f4"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c2f9ccd5c4be370939a2e17602fbc49995299203da72a3429db013d44d590e86"}, + {file = "xxhash-3.6.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02ea4cb627c76f48cd9fb37cf7ab22bd51e57e1b519807234b473faebe526796"}, + {file = "xxhash-3.6.0-cp39-cp39-win32.whl", hash = "sha256:6551880383f0e6971dc23e512c9ccc986147ce7bfa1cd2e4b520b876c53e9f3d"}, + {file = "xxhash-3.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:7c35c4cdc65f2a29f34425c446f2f5cdcd0e3c34158931e1cc927ece925ab802"}, + {file = "xxhash-3.6.0-cp39-cp39-win_arm64.whl", hash = "sha256:ffc578717a347baf25be8397cb10d2528802d24f94cfc005c0e44fef44b5cdd6"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0f7b7e2ec26c1666ad5fc9dbfa426a6a3367ceaf79db5dd76264659d509d73b0"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5dc1e14d14fa0f5789ec29a7062004b5933964bb9b02aae6622b8f530dc40296"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:881b47fc47e051b37d94d13e7455131054b56749b91b508b0907eb07900d1c13"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6dc31591899f5e5666f04cc2e529e69b4072827085c1ef15294d91a004bc1bd"}, + {file = "xxhash-3.6.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:15e0dac10eb9309508bfc41f7f9deaa7755c69e35af835db9cb10751adebc35d"}, + {file = "xxhash-3.6.0.tar.gz", hash = "sha256:f0162a78b13a0d7617b2845b90c763339d1f1d82bb04a4b07f4ab535cc5e05d6"}, +] + [[package]] name = "yarl" version = "1.20.1" @@ -4522,4 +4868,4 @@ cffi = ["cffi (>=1.17,<2.0) ; platform_python_implementation != \"PyPy\" and pyt [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "ccb7c02fa5edba40b21d1f5593d787cda618b08cd48fd5b9d692fc443a5d5b9c" +content-hash = "c9cff0de2e15ec9630ed6a3064ce2fea896f0f1d60faf3c16b66492d022ed273" diff --git a/backend/pyproject.toml b/backend/pyproject.toml index a77105d18a..9d349647ee 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -28,6 +28,7 @@ humanize = "^4.11.0" jinja2 = "^3.1.6" langchain = "^0.3.26" langchain-community = "^0.3.26" +langgraph = "^0.6.10" lxml = "^6.0.0" markdown = "^3.7" openai = "^1.109.0" diff --git a/backend/tests/apps/ai/agent/tools/rag/generator_test.py b/backend/tests/apps/ai/agent/tools/rag/generator_test.py index 7df87b5d90..9f30176cfe 100644 --- a/backend/tests/apps/ai/agent/tools/rag/generator_test.py +++ b/backend/tests/apps/ai/agent/tools/rag/generator_test.py @@ -320,4 +320,4 @@ def test_generate_answer_none_openai_response(self): def test_constants(self): """Test class constants have expected values.""" assert Generator.MAX_TOKENS == 2000 - assert Generator.TEMPERATURE == 0.4 + assert Generator.TEMPERATURE == 0.8 diff --git a/backend/tests/apps/ai/agent/tools/rag/rag_tool_test.py b/backend/tests/apps/ai/agent/tools/rag/rag_tool_test.py deleted file mode 100644 index 0350ef477a..0000000000 --- a/backend/tests/apps/ai/agent/tools/rag/rag_tool_test.py +++ /dev/null @@ -1,179 +0,0 @@ -"""Tests for the RAG Tool.""" - -import os -from unittest.mock import MagicMock, patch - -import pytest - -from apps.ai.agent.tools.rag.rag_tool import RagTool - - -class TestRagTool: - """Test cases for the RagTool class.""" - - def test_init_success(self): - """Test successful initialization of RagTool.""" - with ( - patch.dict(os.environ, {"DJANGO_OPEN_AI_SECRET_KEY": "test-key"}), - patch("apps.ai.agent.tools.rag.rag_tool.Retriever") as mock_retriever_class, - patch("apps.ai.agent.tools.rag.rag_tool.Generator") as mock_generator_class, - ): - mock_retriever = MagicMock() - mock_generator = MagicMock() - mock_retriever_class.return_value = mock_retriever - mock_generator_class.return_value = mock_generator - - rag_tool = RagTool( - embedding_model="custom-embedding-model", chat_model="custom-chat-model" - ) - - assert rag_tool.retriever == mock_retriever - assert rag_tool.generator == mock_generator - mock_retriever_class.assert_called_once_with(embedding_model="custom-embedding-model") - mock_generator_class.assert_called_once_with(chat_model="custom-chat-model") - - def test_init_default_models(self): - """Test initialization with default model parameters.""" - with ( - patch.dict(os.environ, {"DJANGO_OPEN_AI_SECRET_KEY": "test-key"}), - patch("apps.ai.agent.tools.rag.rag_tool.Retriever") as mock_retriever_class, - patch("apps.ai.agent.tools.rag.rag_tool.Generator") as mock_generator_class, - ): - mock_retriever = MagicMock() - mock_generator = MagicMock() - mock_retriever_class.return_value = mock_retriever - mock_generator_class.return_value = mock_generator - - RagTool() - - mock_retriever_class.assert_called_once_with(embedding_model="text-embedding-3-small") - mock_generator_class.assert_called_once_with(chat_model="gpt-4o") - - def test_init_no_api_key(self): - """Test initialization fails when API key is not set.""" - with ( - patch.dict(os.environ, {}, clear=True), - patch("apps.ai.agent.tools.rag.rag_tool.Retriever") as mock_retriever_class, - ): - mock_retriever_class.side_effect = ValueError( - "DJANGO_OPEN_AI_SECRET_KEY environment variable not set" - ) - - with pytest.raises( - ValueError, - match="DJANGO_OPEN_AI_SECRET_KEY environment variable not set", - ): - RagTool() - - def test_query_success(self): - """Test successful query execution.""" - with ( - patch.dict(os.environ, {"DJANGO_OPEN_AI_SECRET_KEY": "test-key"}), - patch("apps.ai.agent.tools.rag.rag_tool.Retriever") as mock_retriever_class, - patch("apps.ai.agent.tools.rag.rag_tool.Generator") as mock_generator_class, - ): - mock_retriever = MagicMock() - mock_generator = MagicMock() - mock_retriever_class.return_value = mock_retriever - mock_generator_class.return_value = mock_generator - - mock_chunks = [{"text": "Test content", "source_name": "Test Source"}] - mock_retriever.retrieve.return_value = mock_chunks - mock_generator.generate_answer.return_value = "Generated answer" - - rag_tool = RagTool() - - result = rag_tool.query( - question="What is OWASP?", - content_types=["chapter"], - limit=10, - similarity_threshold=0.5, - ) - - assert result == "Generated answer" - mock_retriever.retrieve.assert_called_once_with( - content_types=["chapter"], - limit=10, - query="What is OWASP?", - similarity_threshold=0.5, - ) - mock_generator.generate_answer.assert_called_once_with( - context_chunks=mock_chunks, query="What is OWASP?" - ) - - def test_query_with_defaults(self): - """Test query with default parameters.""" - with ( - patch.dict(os.environ, {"DJANGO_OPEN_AI_SECRET_KEY": "test-key"}), - patch("apps.ai.agent.tools.rag.rag_tool.Retriever") as mock_retriever_class, - patch("apps.ai.agent.tools.rag.rag_tool.Generator") as mock_generator_class, - ): - mock_retriever = MagicMock() - mock_generator = MagicMock() - mock_retriever_class.return_value = mock_retriever - mock_generator_class.return_value = mock_generator - - mock_chunks = [] - mock_retriever.retrieve.return_value = mock_chunks - mock_generator.generate_answer.return_value = "Default answer" - - rag_tool = RagTool() - - result = rag_tool.query("Test question") - - assert result == "Default answer" - mock_retriever.retrieve.assert_called_once_with( - content_types=None, - limit=8, - query="Test question", - similarity_threshold=0.1, - ) - - def test_query_empty_content_types(self): - """Test query with empty content types list.""" - with ( - patch.dict(os.environ, {"DJANGO_OPEN_AI_SECRET_KEY": "test-key"}), - patch("apps.ai.agent.tools.rag.rag_tool.Retriever") as mock_retriever_class, - patch("apps.ai.agent.tools.rag.rag_tool.Generator") as mock_generator_class, - ): - mock_retriever = MagicMock() - mock_generator = MagicMock() - mock_retriever_class.return_value = mock_retriever - mock_generator_class.return_value = mock_generator - - mock_chunks = [] - mock_retriever.retrieve.return_value = mock_chunks - mock_generator.generate_answer.return_value = "Answer" - - rag_tool = RagTool() - - result = rag_tool.query("Test question", content_types=[]) - - assert result == "Answer" - mock_retriever.retrieve.assert_called_once_with( - content_types=[], - limit=8, - query="Test question", - similarity_threshold=0.1, - ) - - @patch("apps.ai.agent.tools.rag.rag_tool.logger") - def test_query_logs_retrieval(self, mock_logger): - """Test that query logs the retrieval process.""" - with ( - patch.dict(os.environ, {"DJANGO_OPEN_AI_SECRET_KEY": "test-key"}), - patch("apps.ai.agent.tools.rag.rag_tool.Retriever") as mock_retriever_class, - patch("apps.ai.agent.tools.rag.rag_tool.Generator") as mock_generator_class, - ): - mock_retriever = MagicMock() - mock_generator = MagicMock() - mock_retriever_class.return_value = mock_retriever - mock_generator_class.return_value = mock_generator - - mock_retriever.retrieve.return_value = [] - mock_generator.generate_answer.return_value = "Answer" - - rag_tool = RagTool() - rag_tool.query("Test question") - - mock_logger.info.assert_called_once_with("Retrieving context for query") diff --git a/backend/tests/apps/ai/management/commands/ai_run_rag_tool_test.py b/backend/tests/apps/ai/management/commands/ai_run_rag_tool_test.py deleted file mode 100644 index 71506eb673..0000000000 --- a/backend/tests/apps/ai/management/commands/ai_run_rag_tool_test.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Tests for the ai_run_rag_tool Django management command.""" - -from unittest.mock import MagicMock, patch - -import pytest -from django.core.management.base import BaseCommand - -from apps.ai.management.commands.ai_run_rag_tool import Command - - -@pytest.fixture -def command(): - """Return a command instance.""" - return Command() - - -class TestAiRunRagToolCommand: - """Test suite for the ai_run_rag_tool command.""" - - def test_command_help_text(self, command): - """Test that the command has the correct help text.""" - assert command.help == "Test the RagTool functionality with a sample query" - - def test_command_inheritance(self, command): - """Test that the command inherits from BaseCommand.""" - assert isinstance(command, BaseCommand) - - def test_add_arguments(self, command): - """Test that the command adds the correct arguments.""" - parser = MagicMock() - command.add_arguments(parser) - - assert parser.add_argument.call_count == 6 - parser.add_argument.assert_any_call( - "--query", - type=str, - default="What is OWASP Foundation?", - help="Query to test the Rag tool", - ) - parser.add_argument.assert_any_call( - "--limit", - type=int, - default=8, # DEFAULT_CHUNKS_RETRIEVAL_LIMIT - help="Maximum number of results to retrieve", - ) - parser.add_argument.assert_any_call( - "--threshold", - type=float, - default=0.1, # DEFAULT_SIMILARITY_THRESHOLD - help="Similarity threshold (0.0 to 1.0)", - ) - parser.add_argument.assert_any_call( - "--content-types", - nargs="+", - default=None, - help="Content types to filter by (e.g., project chapter)", - ) - parser.add_argument.assert_any_call( - "--embedding-model", - type=str, - default="text-embedding-3-small", - help="OpenAI embedding model", - ) - parser.add_argument.assert_any_call( - "--chat-model", - type=str, - default="gpt-4o", - help="OpenAI chat model", - ) - - @patch("apps.ai.management.commands.ai_run_rag_tool.RagTool") - def test_handle_success(self, mock_rag_tool, command): - """Test successful command execution.""" - command.stdout = MagicMock() - mock_rag_instance = MagicMock() - mock_rag_instance.query.return_value = "Test answer" - mock_rag_tool.return_value = mock_rag_instance - - command.handle( - query="Test query", - limit=10, - threshold=0.8, - content_types=["project", "chapter"], - embedding_model="text-embedding-3-small", - chat_model="gpt-4o", - ) - - mock_rag_tool.assert_called_once_with( - chat_model="gpt-4o", embedding_model="text-embedding-3-small" - ) - mock_rag_instance.query.assert_called_once_with( - content_types=["project", "chapter"], - limit=10, - question="Test query", - similarity_threshold=0.8, - ) - command.stdout.write.assert_any_call("\nProcessing query...") - command.stdout.write.assert_any_call("\nAnswer: Test answer") - - @patch("apps.ai.management.commands.ai_run_rag_tool.RagTool") - def test_handle_initialization_error(self, mock_rag_tool, command): - """Test command when RagTool initialization fails.""" - command.stderr = MagicMock() - command.style = MagicMock() - mock_rag_tool.side_effect = ValueError("Initialization error") - - command.handle( - query="What is OWASP Foundation?", - limit=8, - threshold=0.1, - content_types=None, - embedding_model="text-embedding-3-small", - chat_model="gpt-4o", - ) - command.stderr.write.assert_called_once() - - @patch("apps.ai.management.commands.ai_run_rag_tool.RagTool") - def test_handle_with_default_values(self, mock_rag_tool, command): - """Test command with default argument values.""" - command.stdout = MagicMock() - mock_rag_instance = MagicMock() - mock_rag_instance.query.return_value = "Default answer" - mock_rag_tool.return_value = mock_rag_instance - - command.handle( - query="What is OWASP Foundation?", - limit=8, - threshold=0.1, - content_types=None, - embedding_model="text-embedding-3-small", - chat_model="gpt-4o", - ) - - mock_rag_tool.assert_called_once_with( - chat_model="gpt-4o", embedding_model="text-embedding-3-small" - ) - mock_rag_instance.query.assert_called_once_with( - content_types=None, - limit=8, # DEFAULT_CHUNKS_RETRIEVAL_LIMIT - question="What is OWASP Foundation?", - similarity_threshold=0.1, # DEFAULT_SIMILARITY_THRESHOLD - ) diff --git a/backend/tests/apps/slack/common/handlers/ai_test.py b/backend/tests/apps/slack/common/handlers/ai_test.py index 265873d9cf..5ebf63193e 100644 --- a/backend/tests/apps/slack/common/handlers/ai_test.py +++ b/backend/tests/apps/slack/common/handlers/ai_test.py @@ -62,53 +62,50 @@ def test_get_blocks_with_empty_response(self, mock_get_error_blocks, mock_proces mock_get_error_blocks.assert_called_once() assert result == error_blocks - @patch("apps.slack.common.handlers.ai.RagTool") - def test_process_ai_query_success(self, mock_rag_tool): - """Test successful AI query processing.""" + @patch("apps.slack.common.handlers.ai.AgenticRAGAgent") + def test_process_ai_query_success(self, mock_agent_class): + """Test successful AI query processing with AgenticRAGAgent.""" query = "What is OWASP?" expected_response = "OWASP is a security organization..." - mock_rag_instance = Mock() - mock_rag_instance.query.return_value = expected_response - mock_rag_tool.return_value = mock_rag_instance + mock_agent = Mock() + mock_agent.run.return_value = {"answer": expected_response} + mock_agent_class.return_value = mock_agent result = process_ai_query(query) - mock_rag_tool.assert_called_once_with( - chat_model="gpt-4o", - embedding_model="text-embedding-3-small", - ) - mock_rag_instance.query.assert_called_once_with(question=query) + mock_agent_class.assert_called_once() + mock_agent.run.assert_called_once_with(query=query) assert result == expected_response - @patch("apps.slack.common.handlers.ai.RagTool") - def test_process_ai_query_failure(self, mock_rag_tool): - """Test AI query processing failure.""" + @patch("apps.slack.common.handlers.ai.AgenticRAGAgent") + def test_process_ai_query_failure(self, mock_agent_class): + """Test AI query processing failure raises exception.""" query = "What is OWASP?" - mock_rag_instance = Mock() - mock_rag_instance.query.side_effect = Exception("AI service error") - mock_rag_tool.return_value = mock_rag_instance + mock_agent = Mock() + mock_agent.run.side_effect = Exception("AI service error") + mock_agent_class.return_value = mock_agent with pytest.raises(Exception, match="AI service error"): process_ai_query(query) - @patch("apps.slack.common.handlers.ai.RagTool") - def test_process_ai_query_returns_none(self, mock_rag_tool): - """Test AI query processing when RAG tool returns None.""" + mock_agent_class.assert_called_once() + mock_agent.run.assert_called_once_with(query=query) + + @patch("apps.slack.common.handlers.ai.AgenticRAGAgent") + def test_process_ai_query_returns_none(self, mock_agent_class): + """Test AI query processing when agent returns no answer.""" query = "What is OWASP?" - mock_rag_instance = Mock() - mock_rag_instance.query.return_value = None - mock_rag_tool.return_value = mock_rag_instance + mock_agent = Mock() + mock_agent.run.return_value = {"answer": None} + mock_agent_class.return_value = mock_agent result = process_ai_query(query) - mock_rag_tool.assert_called_once_with( - chat_model="gpt-4o", - embedding_model="text-embedding-3-small", - ) - mock_rag_instance.query.assert_called_once_with(question=query) + mock_agent_class.assert_called_once() + mock_agent.run.assert_called_once_with(query=query) assert result is None @patch("apps.slack.common.handlers.ai.markdown") From c037f1e57e18875efc3982ddc07f68de67a6f2ae Mon Sep 17 00:00:00 2001 From: Dishant1804 Date: Fri, 17 Oct 2025 00:13:08 +0530 Subject: [PATCH 2/7] spelling fixes --- cspell/custom-dict.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cspell/custom-dict.txt b/cspell/custom-dict.txt index b51c31bedb..afb0674a90 100644 --- a/cspell/custom-dict.txt +++ b/cspell/custom-dict.txt @@ -1,3 +1,4 @@ +Agentic Agsoc Aichi Aissue @@ -34,6 +35,7 @@ Whistleblower Wörld Xmark a2eeef +agentic ahmedxgouda algoliasearch ansa @@ -71,6 +73,7 @@ inlinehilite isanori jumpstart kasya +langgraph lhci libexpat lighthouseci From 49c829f4f1c47f3ec7ead64bb46705e950ec00c8 Mon Sep 17 00:00:00 2001 From: Dishant1804 Date: Fri, 17 Oct 2025 00:24:19 +0530 Subject: [PATCH 3/7] code rabbit and sonar qube suggestions --- backend/apps/ai/agent/nodes.py | 26 +++++++------------ backend/apps/ai/common/utils.py | 17 ++++++++++++ .../management/commands/ai_run_agentic_rag.py | 3 ++- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/backend/apps/ai/agent/nodes.py b/backend/apps/ai/agent/nodes.py index 48622e953b..7398edd3b9 100644 --- a/backend/apps/ai/agent/nodes.py +++ b/backend/apps/ai/agent/nodes.py @@ -17,6 +17,7 @@ DEFAULT_REASONING_MODEL, DEFAULT_SIMILARITY_THRESHOLD, ) +from apps.ai.common.utils import extract_json_from_markdown from apps.core.models.prompt import Prompt @@ -55,7 +56,7 @@ def retrieve(self, state: dict[str, Any]) -> dict[str, Any]: content_types=metadata.get("entity_types"), ) - filtered_chunks = self.filter_chunks_by_metadata(chunks, metadata) + filtered_chunks = self.filter_chunks_by_metadata(chunks, metadata, limit) state["context_chunks"] = filtered_chunks[:limit] return state @@ -115,7 +116,7 @@ def evaluate(self, state: dict[str, Any]) -> dict[str, Any]: content_types=metadata.get("entity_types"), ) - filtered_chunks = self.filter_chunks_by_metadata(new_chunks, metadata) + filtered_chunks = self.filter_chunks_by_metadata(new_chunks, metadata, limit) state["context_chunks"] = filtered_chunks[:limit] state["feedback"] = "Expand and refine answer using newly retrieved context." @@ -137,6 +138,7 @@ def filter_chunks_by_metadata( self, retrieved_chunks: list[dict[str, Any]], query_metadata: dict[str, Any], + limit: int, ) -> list[dict[str, Any]]: """Rank and filter retrieved chunks using metadata and simple heuristics.""" if not retrieved_chunks: @@ -187,7 +189,7 @@ def filter_chunks_by_metadata( key=lambda entry: (entry[1], entry[0].get("similarity", 0)), reverse=True ) - return [chunk for chunk, _ in ranked_chunks[:DEFAULT_CHUNKS_RETRIEVAL_LIMIT]] + return [chunk for chunk, _ in ranked_chunks[:limit]] def extract_query_metadata(self, query: str) -> dict[str, Any]: """Extract metadata from the user's query using an LLM.""" @@ -208,15 +210,10 @@ def extract_query_metadata(self, query: str) -> dict[str, Any]: temperature=0.7, ) content = response.choices[0].message.content.strip() - - if "```json" in content: - content = content.split("```json")[1].split("```")[0].strip() - elif "```" in content: - content = content.split("```")[1].split("```")[0].strip() - + content = extract_json_from_markdown(content) return json.loads(content) - except (openai.OpenAIError, json.JSONDecodeError, ValueError): + except openai.OpenAIError: return { "requested_fields": [], "entity_types": [], @@ -253,15 +250,10 @@ def call_evaluator( temperature=0.7, ) content = response.choices[0].message.content.strip() - - if "```json" in content: - content = content.split("```json")[1].split("```")[0].strip() - elif "```" in content: - content = content.split("```")[1].split("```")[0].strip() - + content = extract_json_from_markdown(content) return json.loads(content) - except (openai.OpenAIError, json.JSONDecodeError, ValueError): + except openai.OpenAIError: return { "complete": False, "feedback": "Evaluator error or invalid response.", diff --git a/backend/apps/ai/common/utils.py b/backend/apps/ai/common/utils.py index 8a258f7f3b..078fbe18a4 100644 --- a/backend/apps/ai/common/utils.py +++ b/backend/apps/ai/common/utils.py @@ -70,6 +70,23 @@ def create_chunks_and_embeddings( return chunks +def extract_json_from_markdown(content: str) -> str: + """Extract JSON content from markdown code blocks. + + Args: + content (str): The content string that may contain markdown code blocks + + Returns: + str: The extracted JSON content with code block markers removed + + """ + if "```json" in content: + return content.split("```json")[1].split("```")[0].strip() + if "```" in content: + return content.split("```")[1].split("```")[0].strip() + return content + + def regenerate_chunks_for_context(context: Context): """Regenerates all chunks for a single, specific context instance. diff --git a/backend/apps/ai/management/commands/ai_run_agentic_rag.py b/backend/apps/ai/management/commands/ai_run_agentic_rag.py index f8087f6295..f6ac140fdc 100644 --- a/backend/apps/ai/management/commands/ai_run_agentic_rag.py +++ b/backend/apps/ai/management/commands/ai_run_agentic_rag.py @@ -29,6 +29,7 @@ def handle(self, *args, **options): return result = agent.run(query=options["query"]) + answer = result.get("answer", "") self.stdout.write(self.style.SUCCESS("Agentic RAG workflow completed")) - self.stdout.write(f"\nAnswer:\n{result.answer}") + self.stdout.write(f"\nAnswer:\n{answer}") From 5ca3138d4ab671c593e92bb67cdfded540b540ac Mon Sep 17 00:00:00 2001 From: Dishant1804 Date: Fri, 17 Oct 2025 00:53:57 +0530 Subject: [PATCH 4/7] code rabbit suggestions --- backend/apps/ai/agent/nodes.py | 15 +++++++-------- backend/apps/ai/common/constants.py | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/backend/apps/ai/agent/nodes.py b/backend/apps/ai/agent/nodes.py index 7398edd3b9..8a4371c5d6 100644 --- a/backend/apps/ai/agent/nodes.py +++ b/backend/apps/ai/agent/nodes.py @@ -66,9 +66,7 @@ def generate(self, state: dict[str, Any]) -> dict[str, Any]: iteration = state.get("iteration", 0) + 1 feedback = state.get("feedback") query = state["query"] - augmented_query = ( - query if not feedback else f"{query}\\n\\nRevise per feedback:\\n{feedback}" - ) + augmented_query = query if not feedback else f"{query}\n\nRevise per feedback:\n{feedback}" answer = self.generator.generate_answer( query=augmented_query, @@ -103,8 +101,8 @@ def evaluate(self, state: dict[str, Any]) -> dict[str, Any]: if history: history[-1]["evaluation"] = evaluation - if "missing context" in evaluation.get("justification", "").lower(): - limit = state.get("limit", DEFAULT_CHUNKS_RETRIEVAL_LIMIT) * 2 + if evaluation.get("requires_more_context", False): + limit = min(state.get("limit", DEFAULT_CHUNKS_RETRIEVAL_LIMIT) * 2, 64) threshold = state.get("similarity_threshold", DEFAULT_SIMILARITY_THRESHOLD) * 0.95 metadata = state.get("extracted_metadata", {}) @@ -227,9 +225,9 @@ def call_evaluator( """Call the evaluator LLM to assess the quality of the generated answer.""" formatted_context = self.generator.prepare_context(context_chunks) evaluation_prompt = ( - f"User Query:\\n{query}\\n\\n" - f"Candidate Answer:\\n{answer}\\n\\n" - f"Context Provided:\\n{formatted_context}\\n\\n" + f"User Query:\n{query}\n\n" + f"Candidate Answer:\n{answer}\n\n" + f"Context Provided:\n{formatted_context}\n\n" "Respond with the mandated JSON object." ) @@ -258,4 +256,5 @@ def call_evaluator( "complete": False, "feedback": "Evaluator error or invalid response.", "justification": "Evaluator error or invalid response.", + "requires_more_context": False, } diff --git a/backend/apps/ai/common/constants.py b/backend/apps/ai/common/constants.py index 07942e7e5d..363cf47422 100644 --- a/backend/apps/ai/common/constants.py +++ b/backend/apps/ai/common/constants.py @@ -2,7 +2,7 @@ DEFAULT_CHUNKS_RETRIEVAL_LIMIT = 32 DEFAULT_LAST_REQUEST_OFFSET_SECONDS = 2 -DEFAULT_MAX_ITERATIONS = 4 +DEFAULT_MAX_ITERATIONS = 3 DEFAULT_REASONING_MODEL = "gpt-4o" DEFAULT_SIMILARITY_THRESHOLD = 0.1 DELIMITER = "\n\n" From 331f022490dac9591d2899b621b599a4afe2086e Mon Sep 17 00:00:00 2001 From: Dishant1804 Date: Fri, 17 Oct 2025 13:12:47 +0530 Subject: [PATCH 5/7] refining --- backend/apps/ai/agent/tools/rag/generator.py | 22 +++++++++++++++++-- backend/apps/ai/models/chunk.py | 2 +- .../core/migrations/0003_alter_prompt_text.py | 4 ++-- backend/apps/core/models/prompt.py | 2 +- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/backend/apps/ai/agent/tools/rag/generator.py b/backend/apps/ai/agent/tools/rag/generator.py index a8e38cedee..7a896a0b84 100644 --- a/backend/apps/ai/agent/tools/rag/generator.py +++ b/backend/apps/ai/agent/tools/rag/generator.py @@ -16,7 +16,7 @@ class Generator: """Generates answers to user queries based on retrieved context.""" MAX_TOKENS = 2000 - TEMPERATURE = 0.8 + TEMPERATURE = 0.5 def __init__(self, chat_model: str = "gpt-4o"): """Initialize the Generator. @@ -53,8 +53,16 @@ def prepare_context(self, context_chunks: list[dict[str, Any]]) -> str: for i, chunk in enumerate(context_chunks): source_name = chunk.get("source_name", f"Unknown Source {i + 1}") text = chunk.get("text", "") + additional_context = chunk.get("additional_context", {}) + + if additional_context: + context_block = ( + f"Source Name: {source_name}\nContent: {text}\n" + f"Additional Context: {additional_context}" + ) + else: + context_block = f"Source Name: {source_name}\nContent: {text}" - context_block = f"Source Name: {source_name}\nContent: {text}" formatted_context.append(context_block) return "\n\n---\n\n".join(formatted_context) @@ -73,6 +81,16 @@ def generate_answer(self, query: str, context_chunks: list[dict[str, Any]]) -> s formatted_context = self.prepare_context(context_chunks) user_prompt = f""" +- You are an assistant for question-answering tasks related to OWASP. +- Use the following pieces of retrieved context to answer the question. +- If the question is related to OWASP then you can try to answer based on your knowledge, if you +don't know the answer, just say that you don't know. +- Try to give answer and keep the answer concise, but you really think that the response will be +longer and better you will provide more information. +- Ask for the current location if the query is related to location. +- Ask for the information you need if the query is very personalized or user-centric. +- Do not mention or refer to the word "context", "based on context", "provided information", +"Information given to me" or similar phrases in your responses. Question: {query} Context: {formatted_context} Answer: diff --git a/backend/apps/ai/models/chunk.py b/backend/apps/ai/models/chunk.py index ad6f6c60cf..a8e568c7ea 100644 --- a/backend/apps/ai/models/chunk.py +++ b/backend/apps/ai/models/chunk.py @@ -35,7 +35,7 @@ def bulk_save(chunks, fields=None): def split_text(text: str) -> list[str]: """Split text into chunks.""" return RecursiveCharacterTextSplitter( - chunk_size=200, + chunk_size=300, chunk_overlap=20, length_function=len, separators=["\n\n", "\n", " ", ""], diff --git a/backend/apps/core/migrations/0003_alter_prompt_text.py b/backend/apps/core/migrations/0003_alter_prompt_text.py index b9710509d3..153d36211d 100644 --- a/backend/apps/core/migrations/0003_alter_prompt_text.py +++ b/backend/apps/core/migrations/0003_alter_prompt_text.py @@ -1,4 +1,4 @@ -# Generated by Django 5.2.6 on 2025-10-15 13:02 +# Generated by Django 5.2.6 on 2025-10-17 05:29 from django.db import migrations, models @@ -12,6 +12,6 @@ class Migration(migrations.Migration): migrations.AlterField( model_name="prompt", name="text", - field=models.TextField(blank=True, default="", max_length=2000, verbose_name="Text"), + field=models.TextField(blank=True, default="", max_length=3000, verbose_name="Text"), ), ] diff --git a/backend/apps/core/models/prompt.py b/backend/apps/core/models/prompt.py index 654cc7d3cd..61ea21ae05 100644 --- a/backend/apps/core/models/prompt.py +++ b/backend/apps/core/models/prompt.py @@ -20,7 +20,7 @@ class Meta: name = models.CharField(verbose_name="Name", max_length=100) key = models.CharField(verbose_name="Key", max_length=100, unique=True, blank=True) - text = models.TextField(verbose_name="Text", max_length=2000, default="", blank=True) + text = models.TextField(verbose_name="Text", max_length=3000, default="", blank=True) def __str__(self): """Prompt human readable representation.""" From 59850f059fc95f1ee0e2c05672a057b5d418fcbf Mon Sep 17 00:00:00 2001 From: Dishant1804 Date: Fri, 17 Oct 2025 14:11:01 +0530 Subject: [PATCH 6/7] fix test --- backend/tests/apps/ai/agent/tools/rag/generator_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/tests/apps/ai/agent/tools/rag/generator_test.py b/backend/tests/apps/ai/agent/tools/rag/generator_test.py index 9f30176cfe..8b820adb35 100644 --- a/backend/tests/apps/ai/agent/tools/rag/generator_test.py +++ b/backend/tests/apps/ai/agent/tools/rag/generator_test.py @@ -320,4 +320,4 @@ def test_generate_answer_none_openai_response(self): def test_constants(self): """Test class constants have expected values.""" assert Generator.MAX_TOKENS == 2000 - assert Generator.TEMPERATURE == 0.8 + assert Generator.TEMPERATURE == 0.5 From 4de76fd5e445ae469b71f197f9140e2318d798b6 Mon Sep 17 00:00:00 2001 From: Dishant1804 Date: Fri, 17 Oct 2025 20:04:39 +0530 Subject: [PATCH 7/7] refining --- backend/apps/ai/models/chunk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/apps/ai/models/chunk.py b/backend/apps/ai/models/chunk.py index a8e568c7ea..ad6f6c60cf 100644 --- a/backend/apps/ai/models/chunk.py +++ b/backend/apps/ai/models/chunk.py @@ -35,7 +35,7 @@ def bulk_save(chunks, fields=None): def split_text(text: str) -> list[str]: """Split text into chunks.""" return RecursiveCharacterTextSplitter( - chunk_size=300, + chunk_size=200, chunk_overlap=20, length_function=len, separators=["\n\n", "\n", " ", ""],