Skip to content
7 changes: 6 additions & 1 deletion backend/apps/api/rest/v0/issue.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,12 @@ def list_issues(
if filters.state:
issues = issues.filter(state=filters.state)

return issues.order_by(ordering or "-created_at", "-updated_at")
primary_order = ordering or "-created_at"
order_fields = [primary_order]
if primary_order not in {"updated_at", "-updated_at"}:
order_fields.append("-updated_at")

return issues.order_by(*order_fields)


@router.get(
Expand Down
7 changes: 6 additions & 1 deletion backend/apps/api/rest/v0/milestone.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,12 @@ def list_milestones(
if filters.state:
milestones = milestones.filter(state=filters.state)

return milestones.order_by(ordering or "-created_at", "-updated_at")
primary_order = ordering or "-created_at"
order_fields = [primary_order]
if primary_order not in {"updated_at", "-updated_at"}:
order_fields.append("-updated_at")

return milestones.order_by(*order_fields)


@router.get(
Expand Down
6 changes: 0 additions & 6 deletions backend/apps/api/rest/v0/pagination.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,20 +32,14 @@ def paginate_queryset(self, queryset, pagination: Input, **params):
"""Paginate the queryset and return standardized output."""
page = pagination.page
page_size = pagination.page_size

# Calculate pagination.
total_count = queryset.count()
# Ensure total_pages is at least 1 for consistent metadata.
total_pages = max(1, (total_count + page_size - 1) // page_size)

# Validate that the requested page is within the valid range.
if page > total_pages:
message = f"Page {page} not found. Valid pages are 1 to {total_pages}."
raise Http404(message)

offset = (page - 1) * page_size

# Get the page items.
items = list(queryset[offset : offset + page_size])

return {
Expand Down
7 changes: 6 additions & 1 deletion backend/apps/api/rest/v0/release.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,12 @@ def list_release(
if filters.tag_name:
releases = releases.filter(tag_name=filters.tag_name)

return releases.order_by(ordering or "-published_at", "-created_at")
primary_order = ordering or "-published_at"
order_fields = [primary_order]
if primary_order not in {"created_at", "-created_at"}:
order_fields.append("-created_at")

return releases.order_by(*order_fields)


@router.get(
Expand Down
96 changes: 96 additions & 0 deletions backend/tests/apps/ai/agent/agent_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
"""Tests for the AgenticRAGAgent class."""

from unittest.mock import MagicMock

from apps.ai.agent.agent import AgenticRAGAgent


class TestAgenticRAGAgent:
"""Tests for AgenticRAGAgent."""

target_module = "apps.ai.agent.agent"

def test_init(self, mocker):
"""Test AgenticRAGAgent initialization."""
mock_nodes = mocker.patch(f"{self.target_module}.AgentNodes")
mock_state_graph = mocker.patch(f"{self.target_module}.StateGraph")

mock_graph_instance = MagicMock()
mock_state_graph.return_value = mock_graph_instance

agent = AgenticRAGAgent()

mock_nodes.assert_called_once()
assert agent.nodes is not None
assert agent.graph is not None

def test_run(self, mocker):
"""Test the run method executes the RAG workflow."""
mocker.patch(f"{self.target_module}.AgentNodes")
mock_state_graph = mocker.patch(f"{self.target_module}.StateGraph")

mock_graph_instance = MagicMock()
mock_compiled_graph = MagicMock()
mock_state_graph.return_value = mock_graph_instance
mock_graph_instance.compile.return_value = mock_compiled_graph

mock_compiled_graph.invoke.return_value = {
"answer": "Test answer",
"context_chunks": [{"text": "chunk1"}],
"evaluation": {"score": 0.9},
"extracted_metadata": {"key": "value"},
"history": ["step1", "step2"],
"iteration": 2,
}

agent = AgenticRAGAgent()
result = agent.run("Test query")

mock_compiled_graph.invoke.assert_called_once()
assert result["answer"] == "Test answer"
assert result["context_chunks"] == [{"text": "chunk1"}]
assert result["evaluation"] == {"score": 0.9}
assert result["extracted_metadata"] == {"key": "value"}
assert result["history"] == ["step1", "step2"]
assert result["iterations"] == 2

def test_run_with_empty_result(self, mocker):
"""Test run method handles empty results gracefully."""
mocker.patch(f"{self.target_module}.AgentNodes")
mock_state_graph = mocker.patch(f"{self.target_module}.StateGraph")

mock_graph_instance = MagicMock()
mock_compiled_graph = MagicMock()
mock_state_graph.return_value = mock_graph_instance
mock_graph_instance.compile.return_value = mock_compiled_graph

mock_compiled_graph.invoke.return_value = {}

agent = AgenticRAGAgent()
result = agent.run("Test query")

assert result["answer"] == ""
assert result["context_chunks"] == []
assert result["evaluation"] == {}
assert result["extracted_metadata"] == {}
assert result["history"] == []
assert result["iterations"] == 0

def test_build_graph(self, mocker):
"""Test build_graph creates the correct state machine."""
mocker.patch(f"{self.target_module}.AgentNodes")
mock_state_graph = mocker.patch(f"{self.target_module}.StateGraph")
mock_start = mocker.patch(f"{self.target_module}.START")
mocker.patch(f"{self.target_module}.END")

mock_graph_instance = MagicMock()
mock_state_graph.return_value = mock_graph_instance

AgenticRAGAgent()

assert mock_graph_instance.add_node.call_count == 3
mock_graph_instance.add_edge.assert_any_call(mock_start, "retrieve")
mock_graph_instance.add_edge.assert_any_call("retrieve", "generate")
mock_graph_instance.add_edge.assert_any_call("generate", "evaluate")
mock_graph_instance.add_conditional_edges.assert_called_once()
mock_graph_instance.compile.assert_called_once()
141 changes: 141 additions & 0 deletions backend/tests/apps/ai/agent/nodes_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import openai
import pytest
from django.core.exceptions import ObjectDoesNotExist

from apps.ai.agent.nodes import AgentNodes
from apps.ai.common.constants import DEFAULT_CHUNKS_RETRIEVAL_LIMIT, DEFAULT_SIMILARITY_THRESHOLD
Expand Down Expand Up @@ -79,6 +80,20 @@ def test_evaluate_requires_more_context(self, nodes, mocker):
assert "context_chunks" in new_state
assert new_state["evaluation"] == mock_eval

def test_evaluate_updates_history(self, nodes, mocker):
"""Test that evaluation updates the last history entry."""
state = {
"query": "test",
"answer": "good",
"history": [{"iteration": 1, "answer": "good"}],
}
mock_eval = {"requires_more_context": False, "feedback": None, "complete": True}
nodes.call_evaluator = mocker.Mock(return_value=mock_eval)

new_state = nodes.evaluate(state)

assert new_state["history"][-1]["evaluation"] == mock_eval

def test_evaluate_complete(self, nodes, mocker):
state = {"query": "test", "answer": "good"}
mock_eval = {"requires_more_context": False, "feedback": None, "complete": True}
Expand Down Expand Up @@ -109,6 +124,82 @@ def test_filter_chunks_by_metadata(self, nodes):
filtered = nodes.filter_chunks_by_metadata(chunks, metadata, limit=10)
assert filtered[0]["text"] == "foo"

def test_filter_chunks_empty_list(self, nodes):
"""Test filter_chunks_by_metadata returns empty list for empty input."""
result = nodes.filter_chunks_by_metadata([], {"filters": {}}, limit=10)
assert result == []

def test_filter_chunks_no_filters(self, nodes):
"""Test filter_chunks_by_metadata returns original chunks when no filters."""
chunks = [{"text": "chunk1", "similarity": 0.9}]
metadata = {"filters": {}, "requested_fields": []}

result = nodes.filter_chunks_by_metadata(chunks, metadata, limit=10)
assert result == chunks

def test_filter_chunks_with_requested_fields(self, nodes):
"""Test filter_chunks_by_metadata scores chunks with requested fields."""
chunks = [
{"text": "no field", "additional_context": {}, "similarity": 0.8},
{"text": "has field", "additional_context": {"name": "test"}, "similarity": 0.7},
]
metadata = {"filters": {}, "requested_fields": ["name"]}

result = nodes.filter_chunks_by_metadata(chunks, metadata, limit=10)
assert result[0]["text"] == "has field"

def test_filter_chunks_with_list_metadata(self, nodes):
"""Test filter_chunks_by_metadata handles list metadata values."""
chunks = [
{
"text": "has list",
"additional_context": {"tags": ["python", "django"]},
"similarity": 0.8,
},
{
"text": "no match",
"additional_context": {"tags": ["java", "spring"]},
"similarity": 0.9,
},
]
metadata = {"filters": {"tags": "python"}, "requested_fields": []}

result = nodes.filter_chunks_by_metadata(chunks, metadata, limit=10)
assert result[0]["text"] == "has list"

def test_filter_chunks_exact_match(self, nodes):
"""Test filter_chunks_by_metadata handles exact non-string matches."""
chunks = [
{"text": "exact", "additional_context": {"count": 42}, "similarity": 0.8},
{"text": "no match", "additional_context": {"count": 10}, "similarity": 0.9},
]
metadata = {"filters": {"count": 42}, "requested_fields": []}

result = nodes.filter_chunks_by_metadata(chunks, metadata, limit=10)
assert result[0]["text"] == "exact"

def test_filter_chunks_content_match(self, nodes):
"""Test filter_chunks_by_metadata scores content matches."""
chunks = [
{"text": "contains python code", "additional_context": {}, "similarity": 0.7},
{"text": "something else", "additional_context": {}, "similarity": 0.9},
]
metadata = {"filters": {"lang": "python"}, "requested_fields": []}

result = nodes.filter_chunks_by_metadata(chunks, metadata, limit=10)
assert result[0]["text"] == "contains python code"

def test_filter_chunks_metadata_boost(self, nodes):
"""Test filter_chunks_by_metadata adds score for metadata richness."""
chunks = [
{"text": "rich", "additional_context": {"a": 1, "b": 2, "c": 3}, "similarity": 0.7},
{"text": "poor", "additional_context": {}, "similarity": 0.9},
]
metadata = {"filters": {"x": "y"}, "requested_fields": []}

result = nodes.filter_chunks_by_metadata(chunks, metadata, limit=10)
assert result[0]["text"] == "rich"

def test_extract_query_metadata_openai_error(self, nodes, mocker):
mocker.patch(
"apps.ai.agent.nodes.Prompt.get_metadata_extractor_prompt", return_value="sys prompt"
Expand All @@ -118,6 +209,31 @@ def test_extract_query_metadata_openai_error(self, nodes, mocker):
metadata = nodes.extract_query_metadata("query")
assert metadata["intent"] == "general query"

def test_extract_query_metadata_prompt_not_found(self, nodes, mocker):
"""Test extract_query_metadata raises when prompt not found."""
mocker.patch("apps.ai.agent.nodes.Prompt.get_metadata_extractor_prompt", return_value=None)

with pytest.raises(ObjectDoesNotExist, match="metadata-extractor-prompt"):
nodes.extract_query_metadata("query")

def test_extract_query_metadata_success(self, nodes, mocker):
"""Test successful metadata extraction from LLM."""
mocker.patch(
"apps.ai.agent.nodes.Prompt.get_metadata_extractor_prompt", return_value="sys prompt"
)

mock_response = mocker.Mock()
mock_response.choices = [mocker.Mock()]
mock_response.choices[
0
].message.content = '{"entity_types": ["project"], "intent": "search"}'
nodes.openai_client.chat.completions.create.return_value = mock_response

metadata = nodes.extract_query_metadata("find OWASP projects")

assert metadata["entity_types"] == ["project"]
assert metadata["intent"] == "search"

def test_call_evaluator_openai_error(self, nodes, mocker):
nodes.generator.prepare_context.return_value = "ctx"
mocker.patch(
Expand All @@ -127,3 +243,28 @@ def test_call_evaluator_openai_error(self, nodes, mocker):

eval_result = nodes.call_evaluator(query="q", answer="a", context_chunks=[])
assert eval_result["feedback"] == "Evaluator error or invalid response."

def test_call_evaluator_prompt_not_found(self, nodes, mocker):
"""Test call_evaluator raises when prompt not found."""
nodes.generator.prepare_context.return_value = "ctx"
mocker.patch("apps.ai.agent.nodes.Prompt.get_evaluator_system_prompt", return_value=None)

with pytest.raises(ObjectDoesNotExist, match="evaluator-system-prompt"):
nodes.call_evaluator(query="q", answer="a", context_chunks=[])

def test_call_evaluator_success(self, nodes, mocker):
"""Test successful evaluation from LLM."""
nodes.generator.prepare_context.return_value = "ctx"
mocker.patch(
"apps.ai.agent.nodes.Prompt.get_evaluator_system_prompt", return_value="sys prompt"
)

mock_response = mocker.Mock()
mock_response.choices = [mocker.Mock()]
mock_response.choices[0].message.content = '{"complete": true, "feedback": null}'
nodes.openai_client.chat.completions.create.return_value = mock_response

eval_result = nodes.call_evaluator(query="q", answer="a", context_chunks=[])

assert eval_result["complete"]
assert eval_result["feedback"] is None
Loading