Skip to content
1 change: 1 addition & 0 deletions backend/apps/ai/common/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
DEFAULT_MAX_ITERATIONS = 3
DEFAULT_REASONING_MODEL = "gpt-4o"
DEFAULT_SIMILARITY_THRESHOLD = 0.1
DEFAULT_VISION_MODEL = "gpt-4o-mini"
DELIMITER = "\n\n"
GITHUB_REQUEST_INTERVAL_SECONDS = 0.5
MIN_REQUEST_INTERVAL_SECONDS = 1.2
Expand Down
26 changes: 25 additions & 1 deletion backend/apps/ai/flows/assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@
from apps.ai.agents.contribution import create_contribution_agent
from apps.ai.agents.project import create_project_agent
from apps.ai.agents.rag import create_rag_agent
from apps.ai.common.constants import DEFAULT_VISION_MODEL, DELIMITER
from apps.ai.common.intent import Intent
from apps.ai.router import route
from apps.common.open_ai import OpenAi
from apps.slack.constants import (
OWASP_COMMUNITY_CHANNEL_ID,
)
Expand All @@ -23,6 +25,12 @@

CONFIDENCE_THRESHOLD = 0.7

IMAGE_DESCRIPTION_PROMPT = (
"Describe what is shown in these images. Focus on any text, "
"error messages, code snippets, UI elements, or technical details. "
"Be concise."
)


def normalize_channel_id(channel_id: str) -> str:
"""Normalize channel ID by removing # prefix if present.
Expand All @@ -48,14 +56,19 @@ def normalize_channel_id(channel_id: str) -> str:


def process_query( # noqa: PLR0911
query: str, channel_id: str | None = None, *, is_app_mention: bool = False
query: str,
images: list[str] | None = None,
channel_id: str | None = None,
*,
is_app_mention: bool = False,
) -> str | None:
"""Process query using multi-agent architecture.

Supports both single-agent and collaborative multi-agent flows.

Args:
query: User's question
images (list[str] | None): A list of base64 encoded image data URIs.
channel_id: Optional Slack channel ID where the query originated
is_app_mention: Whether this is an explicit app mention (vs channel monitored message)

Expand All @@ -65,6 +78,17 @@ def process_query( # noqa: PLR0911

"""
try:
if images:
image_context = (
OpenAi(model=DEFAULT_VISION_MODEL)
.set_prompt(IMAGE_DESCRIPTION_PROMPT)
.set_input(query)
.set_images(images)
.complete()
)
if image_context:
query = f"{query}{DELIMITER}Image context: {image_context}"

# Step 1: Route to appropriate expert agent
router_result = route(query)
intent = router_result["intent"]
Expand Down
34 changes: 33 additions & 1 deletion backend/apps/common/open_ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@
from __future__ import annotations

import logging
from typing import TYPE_CHECKING

import openai
from django.conf import settings

if TYPE_CHECKING:
from openai.types.chat import ChatCompletionContentPartParam

logger: logging.Logger = logging.getLogger(__name__)


Expand All @@ -29,10 +33,25 @@ def __init__(
timeout=30, # In seconds.
)

self.images: list[str] = []
self.max_tokens = max_tokens
self.model = model
self.temperature = temperature

def set_images(self, images: list[str]) -> OpenAi:
"""Set images data URI.

Args:
images (list[str]): A list of base64 encoded image data URIs.

Returns:
OpenAi: The current instance.

"""
self.images = images

return self

def set_input(self, content: str) -> OpenAi:
"""Set system role content.

Expand Down Expand Up @@ -75,6 +94,19 @@ def set_prompt(self, content: str) -> OpenAi:

return self

@property
def user_content(self) -> list[ChatCompletionContentPartParam]:
"""User message content.

Returns:
list[ChatCompletionContentPartParam]: User message content.

"""
content: list[ChatCompletionContentPartParam] = [{"type": "text", "text": self.input}]
content.extend({"type": "image_url", "image_url": {"url": uri}} for uri in self.images)

return content

def complete(self) -> str | None:
"""Get API response.

Expand All @@ -91,7 +123,7 @@ def complete(self) -> str | None:
max_tokens=self.max_tokens,
messages=[
{"role": "system", "content": self.prompt},
{"role": "user", "content": self.input},
{"role": "user", "content": self.user_content},
],
model=self.model,
temperature=self.temperature,
Expand Down
1 change: 1 addition & 0 deletions backend/apps/slack/MANIFEST.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ oauth_config:
- channels:read
- chat:write
- commands
- files:read
Copy link
Collaborator Author

@rudransh-shrivastava rudransh-shrivastava Feb 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This requires app re-installation.

- groups:read
- groups:write
- im:history
Expand Down
20 changes: 16 additions & 4 deletions backend/apps/slack/common/handlers/ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,17 @@


def get_blocks(
query: str, channel_id: str | None = None, *, is_app_mention: bool = False
query: str,
images: list[str] | None = None,
channel_id: str | None = None,
*,
is_app_mention: bool = False,
) -> list[dict]:
"""Get AI response blocks.

Args:
query (str): The user's question.
images (list[str] | None): A list of base64 encoded image data URIs.
channel_id (str | None): The Slack channel ID where the query originated.
is_app_mention (bool): Whether this is an explicit app mention.

Expand All @@ -26,7 +31,7 @@ def get_blocks(

"""
ai_response = process_ai_query(
query.strip(), channel_id=channel_id, is_app_mention=is_app_mention
query.strip(), images=images, channel_id=channel_id, is_app_mention=is_app_mention
)

if ai_response:
Expand All @@ -37,12 +42,17 @@ def get_blocks(


def process_ai_query(
query: str, channel_id: str | None = None, *, is_app_mention: bool = False
query: str,
images: list[str] | None = None,
channel_id: str | None = None,
*,
is_app_mention: bool = False,
) -> str | None:
"""Process the AI query using CrewAI flow.

Args:
query (str): The user's question.
images (list[str] | None): A list of base64 encoded image data URIs.
channel_id (str | None): The Slack channel ID where the query originated.
is_app_mention (bool): Whether this is an explicit app mention.

Expand All @@ -51,7 +61,9 @@ def process_ai_query(

"""
try:
return process_query(query, channel_id=channel_id, is_app_mention=is_app_mention)
return process_query(
query, images=images, channel_id=channel_id, is_app_mention=is_app_mention
)
except Exception:
logger.exception("Failed to process AI query")
return None
Expand Down
28 changes: 26 additions & 2 deletions backend/apps/slack/events/app_mention.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
"""Slack app mention event handler."""

import base64
import logging

from apps.slack.common.handlers.ai import get_blocks
from apps.slack.events.event import EventBase
from apps.slack.models import Conversation
from apps.slack.utils import download_file

logger = logging.getLogger(__name__)

ALLOWED_MIMETYPES = {"image/jpeg", "image/png", "image/webp"}
MAX_IMAGE_SIZE = 2 * 1024 * 1024 # 2 MB


class AppMention(EventBase):
"""Handles app mention events when the bot is mentioned in a channel."""
Expand All @@ -17,6 +22,7 @@ class AppMention(EventBase):
def handle_event(self, event, client):
"""Handle an incoming app mention event."""
channel_id = event.get("channel")
files = event.get("files", [])
text = event.get("text", "")

if not Conversation.objects.filter(
Expand All @@ -26,6 +32,13 @@ def handle_event(self, event, client):
logger.warning("NestBot AI Assistant is not enabled for this conversation.")
return

images_raw = [
file
for file in files
if file.get("mimetype") in ALLOWED_MIMETYPES
and file.get("size", float("inf")) <= MAX_IMAGE_SIZE
]

query = text
for mention in event.get("blocks", []):
if mention.get("type") == "rich_text":
Expand Down Expand Up @@ -82,8 +95,19 @@ def handle_event(self, event, client):
},
)

# Get AI response and post it
reply_blocks = get_blocks(query=query, channel_id=channel_id, is_app_mention=True)
image_uris = []
for image in images_raw:
content = download_file(image.get("url_private"), client.token)
if content:
image_uri = (
f"data:{image.get('mimetype')};base64,{base64.b64encode(content).decode()}"
)
image_uris.append(image_uri)

# Get AI response and post it.
reply_blocks = get_blocks(
query=query, images=image_uris, channel_id=channel_id, is_app_mention=True
)
client.chat_postMessage(
channel=channel_id,
blocks=reply_blocks,
Expand Down
26 changes: 26 additions & 0 deletions backend/apps/slack/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
from html import escape as escape_html
from typing import TYPE_CHECKING

import requests

if TYPE_CHECKING: # pragma: no cover
from django.db.models import QuerySet

Expand All @@ -17,6 +19,30 @@
logger: logging.Logger = logging.getLogger(__name__)


def download_file(url: str, token: str) -> bytes | None:
"""Download Slack file.

Args:
url (str): The url of the file.
token (str): The slack bot token.

Returns:
bytes or None: The downloaded file content, or None if download failed.

"""
if not url or not token:
return None

try:
response = requests.get(url, headers={"Authorization": f"Bearer {token}"}, timeout=30)
response.raise_for_status()
except requests.RequestException as e:
logger.exception("Failed to download Slack file", extra={"error": str(e)})
return None

return response.content


def escape(content: str) -> str:
"""Escape HTML content.

Expand Down
Empty file.
Loading