Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
a201dd9
Gemini Support for Local Development
rajnisk Jan 29, 2026
c4ad17a
fix: resolve inconsistent LLM provider selection and env var mapping
rajnisk Jan 29, 2026
94752cf
fix(ai): resolve missing prompt crash and stabilize LLM integration
rajnisk Jan 29, 2026
cd4648c
feat(ai): add multi-agent orchestration, async Slack handling, and st…
rajnisk Jan 29, 2026
c755952
fix(ai): improve bot reactions, RAG search, greeting handling, and tests
rajnisk Jan 30, 2026
9148cc9
fix(ai): improve bot reactions, RAG search, greeting handling, and tests
rajnisk Jan 30, 2026
40584df
fix(ai): prevent Question Detector output from being posted and handl…
rajnisk Jan 30, 2026
acf70c4
Fix 5 code smell issues detected by SonarCloud/CodeRabbit
rajnisk Jan 30, 2026
13fefce
fix(ci): address pre-commit checks and code quality issues
rajnisk Jan 31, 2026
e2616f4
Restore .gitignore to match target branch
rajnisk Jan 31, 2026
e6652d4
fix: address PR review feedback for Slack AI assistant
rajnisk Feb 3, 2026
fdbfa5c
fix: address code review feedback and improve error handling
rajnisk Feb 3, 2026
9bc5b05
fix: address CodeRabbit review comments
rajnisk Feb 4, 2026
5c008ac
fix: remove redundant link formatting to address CodeRabbit comment
rajnisk Feb 4, 2026
c5b613f
fix: use precise regex matching for YES/NO detection to avoid false p…
rajnisk Feb 4, 2026
7f95e13
fix: use SecretValue for GOOGLE_API_KEY for consistent secret handling
rajnisk Feb 4, 2026
c892d62
fix: revert GOOGLE_API_KEY to Value() for optional secret handling
rajnisk Feb 4, 2026
d69f652
fix: explicitly set environ_name for GOOGLE_API_KEY to make it optional
rajnisk Feb 4, 2026
29ff325
fix: use lazy imports to prevent AppRegistryNotReady error
rajnisk Feb 4, 2026
d005c33
fix: avoid logging full blocks payload to prevent PII exposure
rajnisk Feb 4, 2026
bf0fcc8
fix: address CodeRabbit security and multi-workspace issues
rajnisk Feb 4, 2026
077beea
fix: cache bot_user_id under both keys when team_id mismatch occurs
rajnisk Feb 4, 2026
e78c90c
fix: make chunk dimension validation lenient for tests and message_ts…
rajnisk Feb 4, 2026
014deef
fix: address CodeRabbit comments in message_auto_reply.py
rajnisk Feb 4, 2026
37c1949
fix: narrow exception handling in message_posted.py and fix syntax error
rajnisk Feb 4, 2026
bc611c2
refactor: remove redundant duplicate validation pass in format_blocks
rajnisk Feb 4, 2026
8a1413c
fix: continue processing on parse failure instead of dropping messages
rajnisk Feb 4, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ frontend/yarn-error.log*
logs
node_modules/
TODO
venv/
venv/
1 change: 1 addition & 0 deletions backend/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ django-shell:
dump-data:
@echo "Dumping Nest data"
@CMD="python manage.py dumpdata \
core \
github \
owasp \
slack.Conversation \
Expand Down
4 changes: 2 additions & 2 deletions backend/apps/ai/agents/chapter/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from apps.ai.template_loader import env


def create_chapter_agent() -> Agent:
def create_chapter_agent(allow_delegation: bool = False) -> Agent:
"""Create Chapter Expert Agent.

Returns:
Expand All @@ -25,6 +25,6 @@ def create_chapter_agent() -> Agent:
tools=[search_chapters],
llm=get_llm(),
verbose=True,
allow_delegation=False,
allow_delegation=allow_delegation,
memory=False,
)
7 changes: 5 additions & 2 deletions backend/apps/ai/agents/community/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,12 @@
from apps.ai.template_loader import env


def create_community_agent() -> Agent:
def create_community_agent(allow_delegation: bool = False) -> Agent:
"""Create Community Expert Agent.

Args:
allow_delegation (bool): Whether the agent can delegate tasks. Defaults to False.

Returns:
Agent: Community Expert Agent configured with community tools

Expand All @@ -33,6 +36,6 @@ def create_community_agent() -> Agent:
],
llm=get_llm(),
verbose=True,
allow_delegation=False,
allow_delegation=allow_delegation,
memory=False,
)
7 changes: 5 additions & 2 deletions backend/apps/ai/agents/contribution/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,12 @@
from apps.ai.template_loader import env


def create_contribution_agent() -> Agent:
def create_contribution_agent(allow_delegation: bool = False) -> Agent:
"""Create Contribution Expert Agent.

Args:
allow_delegation (bool): Whether the agent can delegate tasks. Defaults to False.

Returns:
Agent: Contribution Expert Agent configured with contribution and GSoC tools

Expand All @@ -34,6 +37,6 @@ def create_contribution_agent() -> Agent:
],
llm=get_llm(),
verbose=True,
allow_delegation=False,
allow_delegation=allow_delegation,
memory=False,
)
7 changes: 5 additions & 2 deletions backend/apps/ai/agents/project/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,12 @@
from apps.ai.template_loader import env


def create_project_agent() -> Agent:
def create_project_agent(allow_delegation: bool = False) -> Agent:
"""Create Project Expert Agent.

Args:
allow_delegation (bool): Whether the agent can delegate tasks. Defaults to False.

Returns:
Agent: Project Expert Agent configured with project tools

Expand All @@ -39,6 +42,6 @@ def create_project_agent() -> Agent:
],
llm=get_llm(),
verbose=True,
allow_delegation=False,
allow_delegation=allow_delegation,
memory=False,
)
7 changes: 5 additions & 2 deletions backend/apps/ai/agents/rag/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,12 @@
from apps.ai.template_loader import env


def create_rag_agent() -> Agent:
def create_rag_agent(allow_delegation: bool = False) -> Agent:
"""Create RAG Agent.

Args:
allow_delegation (bool): Whether the agent can delegate tasks. Defaults to False.

Returns:
Agent: RAG Agent configured with semantic search tools

Expand All @@ -25,6 +28,6 @@ def create_rag_agent() -> Agent:
tools=[semantic_search],
llm=get_llm(),
verbose=True,
allow_delegation=False,
allow_delegation=allow_delegation,
memory=False,
)
34 changes: 24 additions & 10 deletions backend/apps/ai/common/llm_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,32 +2,46 @@

from __future__ import annotations

import os
import logging

from crewai import LLM
from django.conf import settings

logger = logging.getLogger(__name__)


def get_llm() -> LLM:
"""Get configured LLM instance.

Returns:
LLM: Configured LLM instance with gpt-4.1-mini as default model.
LLM: Configured LLM instance based on settings.

"""
provider = os.getenv("LLM_PROVIDER", "openai")
provider = settings.LLM_PROVIDER

if provider == "openai":
return LLM(
model=os.getenv("OPENAI_MODEL_NAME", "gpt-4.1-mini"),
api_key=os.getenv("DJANGO_OPEN_AI_SECRET_KEY"),
model=settings.OPENAI_MODEL_NAME,
api_key=settings.OPEN_AI_SECRET_KEY,
temperature=0.1,
)
if provider == "anthropic":
if provider == "google":
return LLM(
model=os.getenv("ANTHROPIC_MODEL_NAME", "claude-3-5-sonnet-20241022"),
api_key=os.getenv("ANTHROPIC_API_KEY"),
model=settings.GOOGLE_MODEL_NAME,
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
api_key=settings.GOOGLE_API_KEY,
temperature=0.1,
)

error_msg = f"Unsupported LLM provider: {provider}"
raise ValueError(error_msg)
# Fallback to OpenAI if provider not recognized or not specified
if provider and provider not in ("openai", "google"):
logger.warning(
"Unrecognized LLM_PROVIDER '%s'. Falling back to OpenAI. "
"Supported providers: 'openai', 'google'",
provider,
)
return LLM(
model=settings.OPENAI_MODEL_NAME,
api_key=settings.OPEN_AI_SECRET_KEY,
temperature=0.1,
)
10 changes: 8 additions & 2 deletions backend/apps/ai/embeddings/factory.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,24 @@
"""Factory function to get the configured embedder."""

from django.conf import settings

from apps.ai.embeddings.base import Embedder
from apps.ai.embeddings.google import GoogleEmbedder
from apps.ai.embeddings.openai import OpenAIEmbedder


def get_embedder() -> Embedder:
"""Get the configured embedder.

Currently returns OpenAI embedder, but can be extended to support
Currently returns OpenAI and Google embedder, but can be extended to support
other providers (e.g., Anthropic, Cohere, etc.).

Returns:
Embedder instance configured for the current provider.

"""
# Currently OpenAI, but can be extended to support other providers
# Currently OpenAI and Google, but can be extended to support other providers
if settings.LLM_PROVIDER == "google":
return GoogleEmbedder()

return OpenAIEmbedder()
79 changes: 79 additions & 0 deletions backend/apps/ai/embeddings/google.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
"""Google implementation of embedder."""

from __future__ import annotations

import requests
from django.conf import settings

from apps.ai.embeddings.base import Embedder


class GoogleEmbedder(Embedder):
"""Google implementation of embedder using OpenAI compatible endpoint."""

def __init__(self, model: str = "text-embedding-004") -> None:
"""Initialize Google embedder.

Args:
model: The Google embedding model to use.

"""
self.api_key = settings.GOOGLE_API_KEY
self.model = model
self.endpoint = "https://generativelanguage.googleapis.com/v1beta/openai/embeddings"
self._dimensions = 768 # text-embedding-004 dimensions

def embed_query(self, text: str) -> list[float]:
"""Generate embedding for a query string.

Args:
text: The query text to embed.

Returns:
List of floats representing the embedding vector.

"""
response = requests.post(
self.endpoint,
headers={"Authorization": f"Bearer {self.api_key}"},
json={
"input": text,
"model": self.model,
},
timeout=30,
)
response.raise_for_status()
data = response.json()
return data["data"][0]["embedding"]

def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Generate embeddings for multiple documents.

Args:
texts: List of document texts to embed.

Returns:
List of embedding vectors, one per document.

"""
response = requests.post(
self.endpoint,
headers={"Authorization": f"Bearer {self.api_key}"},
json={
"input": texts,
"model": self.model,
},
timeout=60,
)
response.raise_for_status()
data = response.json()
return [item["embedding"] for item in data["data"]]

def get_dimensions(self) -> int:
"""Get the dimension of embeddings produced by this embedder.

Returns:
Integer representing the embedding dimension.

"""
return self._dimensions
Loading