diff --git a/README.md b/README.md index d0e1159..af537a7 100644 --- a/README.md +++ b/README.md @@ -32,10 +32,12 @@ pip install -r requirements.txt Set up environment variables: Create a `.env` file in the `backend` directory based on the `.env.example` structure. -You will need your `GEMINI_API_KEY`. +You will need your `GEMINI_API_KEY`, as well as `SUPABASE_URL` and `SUPABASE_SERVICE_KEY` which are required by the backend configuration (`app/core/config.py`). ```bash # example .env contents GEMINI_API_KEY="your-api-key-here" +SUPABASE_URL="your-supabase-url" +SUPABASE_SERVICE_KEY="your-supabase-service-key" ``` Start the backend server on `http://localhost:8000`: @@ -73,4 +75,4 @@ Open [http://localhost:3000](http://localhost:3000) with your browser to see the ## Architecture Highlights - Frontend: Next.js 14+ (App Router), TailwindCSS, TypeScript, custom SSE streaming integration. -- Backend: FastAPI, LangChain, SentenceTransformers embedding, and Supabase integration. +- Backend: FastAPI, LangChain, HuggingFace embedding, and Supabase integration. diff --git a/backend/app/api/routes/chat.py b/backend/app/api/routes/chat.py index 9843b1b..1d67122 100644 --- a/backend/app/api/routes/chat.py +++ b/backend/app/api/routes/chat.py @@ -1,18 +1,26 @@ import json import asyncio +import logging from fastapi import APIRouter, Request from pydantic import BaseModel from sse_starlette.sse import EventSourceResponse -from app.services.llm import get_english_translation, get_response_stream +from app.services.llm import get_english_translation, get_response_stream_async from app.services.embedding import embedding_service -from app.services.database import supabase_client +from app.services.database import get_client router = APIRouter() +logger = logging.getLogger(__name__) class ChatRequest(BaseModel): query: str +def _search_documents(query_vector): + return get_client().rpc( + 'match_documents', + {'query_embedding': query_vector, 'match_count': 3} + ).execute() + async def generate_chat_events(request: Request, query: str): """ Generator function that streams SSE events. @@ -20,28 +28,28 @@ async def generate_chat_events(request: Request, query: str): """ # 1. Translate Korean query to English try: - english_query = get_english_translation(query) - except Exception as e: + english_query = await asyncio.to_thread(get_english_translation, query) + except Exception: + logger.exception("Failed to translate query") yield {"event": "error", "data": "오늘은 철학자도 사색의 시간이 필요하답니다. 내일 다시 지혜를 나누러 올게요."} return # 2. Generate vector representation try: - query_vector = embedding_service.generate_embedding(english_query) + query_vector = await asyncio.to_thread(embedding_service.generate_embedding, english_query) except Exception: + logger.exception("Failed to generate query embedding") yield {"event": "error", "data": "오늘은 철학자도 사색의 시간이 필요하답니다. 내일 다시 지혜를 나누러 올게요."} return # 3. Perform hybrid search in Supabase # We use the RPC match_documents function defined in schema.sql try: - response = supabase_client.rpc( - 'match_documents', - {'query_embedding': query_vector, 'match_count': 3} - ).execute() + response = await asyncio.to_thread(_search_documents, query_vector) documents = response.data - except Exception as e: - yield {"event": "error", "data": f"Database search failed: {str(e)}"} + except Exception: + logger.exception("Database search failed") + yield {"event": "error", "data": "검색 중 오류가 발생했습니다. 잠시 후 다시 시도해 주세요."} return if not documents: @@ -72,9 +80,7 @@ async def generate_chat_events(request: Request, query: str): combined_context = "\n\n".join(contexts) try: - llm_stream = get_response_stream(context=combined_context, query=english_query) - - for chunk in llm_stream: + async for chunk in get_response_stream_async(context=combined_context, query=english_query): # If client disconnects, stop generating if await request.is_disconnected(): break @@ -82,7 +88,8 @@ async def generate_chat_events(request: Request, query: str): # Clean up chunk to avoid SSE formatting issues with newlines chunk_clean = chunk.replace("\n", "\\n") yield {"event": "content", "data": chunk_clean} - except Exception as e: + except Exception: + logger.exception("Failed while streaming LLM response") yield {"event": "error", "data": "오늘은 철학자도 사색의 시간이 필요하답니다. 내일 다시 지혜를 나누러 올게요."} return diff --git a/backend/app/core/config.py b/backend/app/core/config.py index 8ac9319..691793f 100644 --- a/backend/app/core/config.py +++ b/backend/app/core/config.py @@ -11,7 +11,7 @@ class Settings(BaseSettings): SUPABASE_SERVICE_KEY: str = "" # Use Service Role Key for backend operations model_config = SettingsConfigDict( - env_file=str(Path(__file__).resolve().parent.parent.parent / ".env"), + env_file=str(Path(__file__).resolve().parents[2] / ".env"), env_file_encoding="utf-8" ) diff --git a/backend/app/services/database.py b/backend/app/services/database.py index 7d22331..d648df8 100644 --- a/backend/app/services/database.py +++ b/backend/app/services/database.py @@ -1,15 +1,30 @@ +import threading from supabase import create_client, Client from app.core.config import settings -def get_supabase_client() -> Client: +SUPABASE_CONFIG_ERROR = "SUPABASE_URL and SUPABASE_SERVICE_KEY must be configured" + +def _get_supabase_client() -> Client: """ Returns a configured Supabase client using the URL and Service Key. The Service Key is used to bypass RLS for administrative backend tasks like upserting documents or fetching metadata securely. """ - supabase_url = settings.SUPABASE_URL or "http://localhost:8000" - supabase_key = settings.SUPABASE_SERVICE_KEY or "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSJ9.1234567890" + supabase_url = settings.SUPABASE_URL + supabase_key = settings.SUPABASE_SERVICE_KEY + if not supabase_url or not supabase_key: + raise RuntimeError(SUPABASE_CONFIG_ERROR) return create_client(supabase_url, supabase_key) -# Initialize a global client to be reused -supabase_client = get_supabase_client() + +_client_lock = threading.Lock() +# Lazy initialization for Supabase client +_supabase_client: Client | None = None + +def get_client() -> Client: + global _supabase_client + if _supabase_client is None: + with _client_lock: + if _supabase_client is None: + _supabase_client = _get_supabase_client() + return _supabase_client diff --git a/backend/app/services/embedding.py b/backend/app/services/embedding.py index d4ed3b3..cb40c34 100644 --- a/backend/app/services/embedding.py +++ b/backend/app/services/embedding.py @@ -1,6 +1,10 @@ import threading +import logging from langchain_community.embeddings import HuggingFaceEmbeddings +logger = logging.getLogger(__name__) + + MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" class EmbeddingService: @@ -13,19 +17,19 @@ def embeddings(self): if self._embeddings is None: with self._lock: if self._embeddings is None: - print(f"Loading local embedding model: {MODEL_NAME} (HuggingFace)...") + logger.info("Loading local embedding model: %s (HuggingFace)...", MODEL_NAME) self._embeddings = HuggingFaceEmbeddings( model_name=MODEL_NAME, model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': True} ) - print("Local embedding model loaded successfully.") + logger.info("Local embedding model loaded successfully.") return self._embeddings def generate_embedding(self, text: str) -> list[float]: """ - Generates a vector embedding for the given text using the FastEmbed model. - Returns a list of 384 floats. + Generates a vector embedding for the given text using the HuggingFace model. + Returns a list of 384 floats matching the model's actual vector length. """ # The embed_query method returns a list of floats embedding = self.embeddings.embed_query(text) diff --git a/backend/app/services/llm.py b/backend/app/services/llm.py index 986a9b9..e7fb641 100644 --- a/backend/app/services/llm.py +++ b/backend/app/services/llm.py @@ -4,6 +4,9 @@ from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.output_parsers import StrOutputParser +if not settings.GEMINI_API_KEY: + raise RuntimeError("GEMINI_API_KEY must be configured") + # Configure Gemini API natively (optional, if native SDK features are needed) genai.configure(api_key=settings.GEMINI_API_KEY) @@ -11,9 +14,9 @@ # We use gemini-2.5-flash for faster and highly capable inference llm = ChatGoogleGenerativeAI( model="gemini-2.5-flash", - google_api_key=settings.GEMINI_API_KEY or "dummy_key_for_testing", + google_api_key=settings.GEMINI_API_KEY, temperature=0.7, - max_retries=0 + max_retries=2 ) translation_prompt = PromptTemplate.from_template( @@ -58,3 +61,12 @@ def get_response_stream(context: str, query: str): prompt = get_rag_prompt() chain = prompt | llm | StrOutputParser() return chain.stream({"context": context, "query": query}) + +async def get_response_stream_async(context: str, query: str): + """ + Returns an async stream of strings from the LLM. + """ + prompt = get_rag_prompt() + chain = prompt | llm | StrOutputParser() + async for chunk in chain.astream({"context": context, "query": query}): + yield chunk diff --git a/backend/pytest_log.txt b/backend/pytest_log.txt index c825192..1bdf36e 100644 Binary files a/backend/pytest_log.txt and b/backend/pytest_log.txt differ diff --git a/backend/pytest_log_utf8.txt b/backend/pytest_log_utf8.txt new file mode 100644 index 0000000..b7d8a76 --- /dev/null +++ b/backend/pytest_log_utf8.txt @@ -0,0 +1,26 @@ +============================= test session starts ============================= +platform win32 -- Python 3.12.12, pytest-9.0.2, pluggy-1.6.0 +rootdir: C:\Users\ysn65\Desktop\antigravity\philo-rag\backend +plugins: anyio-4.12.1, asyncio-1.3.0, cov-7.0.0 +asyncio: mode=Mode.STRICT, debug=False, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function +collected 0 items / 1 error + +=================================== ERRORS ==================================== +___________________ ERROR collecting tests/unit/test_llm.py ___________________ +tests\unit\test_llm.py:12: in + from app.services.llm import get_english_translation, get_response_stream, get_response_stream_async +app\services\llm.py:8: in + raise RuntimeError("GEMINI_API_KEY must be configured") +E RuntimeError: GEMINI_API_KEY must be configured +============================== warnings summary =============================== +:488 + :488: DeprecationWarning: Type google._upb._message.MessageMapContainer uses PyType_Spec with a metaclass that has custom tp_new. This is deprecated and will no longer be allowed in Python 3.14. + +:488 + :488: DeprecationWarning: Type google._upb._message.ScalarMapContainer uses PyType_Spec with a metaclass that has custom tp_new. This is deprecated and will no longer be allowed in Python 3.14. + +-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html +=========================== short test summary info =========================== +ERROR tests/unit/test_llm.py - RuntimeError: GEMINI_API_KEY must be configured +!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!! +======================== 2 warnings, 1 error in 4.52s ========================= diff --git a/backend/requirements.txt b/backend/requirements.txt index 9da7dbf..04a35dd 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -10,4 +10,5 @@ pydantic>=2.7.0 pydantic-settings python-dotenv langchain-community==0.4.1 -sentence-transformers +sentence-transformers>=2.2.0,<3.0.0 +pytest-asyncio>=0.23.0 diff --git a/backend/tests/unit/test_llm.py b/backend/tests/unit/test_llm.py index 95fa73a..062ef09 100644 --- a/backend/tests/unit/test_llm.py +++ b/backend/tests/unit/test_llm.py @@ -1,45 +1,87 @@ -import asyncio -import os + import sys import pytest from pathlib import Path -# dynamically add backend dir to path -backend_dir = Path(__file__).resolve().parent +# dynamically add backend root dir to path +backend_dir = Path(__file__).resolve().parents[2] if str(backend_dir) not in sys.path: sys.path.insert(0, str(backend_dir)) -from app.services.llm import get_english_translation, get_response_stream -from app.core.config import settings +import os -@pytest.mark.skipif(not settings.GEMINI_API_KEY, reason="GEMINI_API_KEY is not configured") -def test_translation(): - print("Testing translation...") +@pytest.fixture(autouse=True) +def setup_test_env(monkeypatch): + monkeypatch.setenv("GEMINI_API_KEY", "dummy_test_key") + monkeypatch.setenv("SUPABASE_URL", "http://localhost:8000") + monkeypatch.setenv("SUPABASE_SERVICE_KEY", "dummy_test_key") + + # Ensure settings reflect the mocked env vars globally in case they were initialized try: + from app.core.config import settings + monkeypatch.setattr(settings, "GEMINI_API_KEY", "dummy_test_key") + monkeypatch.setattr(settings, "SUPABASE_URL", "http://localhost:8000") + monkeypatch.setattr(settings, "SUPABASE_SERVICE_KEY", "dummy_test_key") + except ImportError: + pass + +from unittest.mock import patch, MagicMock + +def test_translation(setup_test_env): + print("Testing translation...") + from app.services.llm import get_english_translation + with patch("app.services.llm.translation_prompt") as mock_prompt, \ + patch("app.services.llm.llm") as _mock_llm, \ + patch("app.services.llm.StrOutputParser") as _mock_parser: + + mock_chain = MagicMock() + mock_chain.invoke.return_value = "Translated Text" + mock_chain.__or__.return_value = mock_chain + mock_prompt.__or__.return_value = mock_chain + translated = get_english_translation("미덕이란 무엇인가?") print("Translation:", translated) - assert translated.strip() != "", "Translation must not be empty" - except Exception as e: - raise AssertionError(f"Translation error: {str(e)}") + assert translated == "Translated Text", "Translation output mocked mismatch" -@pytest.mark.skipif(not settings.GEMINI_API_KEY, reason="GEMINI_API_KEY is not configured") -def test_streaming(): +def test_streaming(setup_test_env): print("Testing streaming...") - try: + from app.services.llm import get_response_stream + with patch("app.services.llm.get_rag_prompt") as mock_prompt, \ + patch("app.services.llm.llm") as _mock_llm, \ + patch("app.services.llm.StrOutputParser") as _mock_parser: + + mock_chain = MagicMock() + mock_chain.stream.return_value = ["안녕하세요", " ", "철학자", "입니다."] + mock_chain.__or__.return_value = mock_chain + mock_prompt.return_value.__or__.return_value = mock_chain + stream = get_response_stream(context="Virtue is excellence.", query="What is virtue?") - chunks_received = 0 - for chunk in stream: - print(chunk, end="", flush=True) - chunks_received += 1 - print("\nStream finished") - assert chunks_received > 0, "No chunks received from streaming API" - except Exception as e: - raise AssertionError(f"Stream error: {str(e)}") + results = list(stream) + assert results == ["안녕하세요", " ", "철학자", "입니다."], "Stream chunks mocked mismatch" + +@pytest.mark.asyncio +async def test_streaming_async(setup_test_env): + print("Testing streaming async...") + from app.services.llm import get_response_stream_async + with patch("app.services.llm.get_rag_prompt") as mock_prompt, \ + patch("app.services.llm.llm") as _mock_llm, \ + patch("app.services.llm.StrOutputParser") as _mock_parser: + + mock_chain = MagicMock() + async def mock_astream(*_args, **_kwargs): + for chunk in ["안녕하세요", " ", "철학자", "입니다."]: + yield chunk + mock_chain.astream = mock_astream + mock_chain.__or__.return_value = mock_chain + mock_prompt.return_value.__or__.return_value = mock_chain + + stream = get_response_stream_async(context="Virtue is excellence.", query="What is virtue?") + results = [chunk async for chunk in stream] + assert results == ["안녕하세요", " ", "철학자", "입니다."], "Async stream chunks mocked mismatch" # For manual execution -async def run_manual_test(): +if __name__ == "__main__": + import asyncio test_translation() test_streaming() - -if __name__ == "__main__": - asyncio.run(run_manual_test()) + asyncio.run(test_streaming_async()) diff --git a/frontend/.stylelintrc.json b/frontend/.stylelintrc.json index a50b211..de1f732 100644 --- a/frontend/.stylelintrc.json +++ b/frontend/.stylelintrc.json @@ -1,11 +1,20 @@ { + "extends": [ + "stylelint-config-standard-scss" + ], "rules": { "scss/at-rule-no-unknown": [ true, { "ignoreAtRules": [ "theme", - "import" + "import", + "apply", + "layer", + "screen", + "variants", + "responsive", + "tailwind" ] } ] diff --git a/frontend/app/layout.tsx b/frontend/app/layout.tsx index cf10a1a..b1b8599 100644 --- a/frontend/app/layout.tsx +++ b/frontend/app/layout.tsx @@ -4,7 +4,7 @@ import "./globals.css"; const inter = Inter({ subsets: ["latin"], variable: "--font-inter", display: "swap" }); const newsreader = Newsreader({ subsets: ["latin"], variable: "--font-newsreader", display: "swap", style: ['normal', 'italic'] }); -const notoSansKr = Noto_Sans_KR({ subsets: ["latin"], weight: ["100", "400", "700", "900"], variable: "--font-noto-sans-kr", display: "swap" }); +const notoSansKr = Noto_Sans_KR({ subsets: ["latin"], weight: ["100", "400", "700", "900"], preload: false, variable: "--font-noto-sans-kr", display: "swap" }); export const metadata: Metadata = { title: "PhiloRAG", @@ -20,10 +20,7 @@ export default function RootLayout({ {children} diff --git a/frontend/app/page.tsx b/frontend/app/page.tsx index 120c7a1..40ffeeb 100644 --- a/frontend/app/page.tsx +++ b/frontend/app/page.tsx @@ -12,8 +12,8 @@ export default function Home() { const handleSendMessage = async (query: string) => { if (!query.trim() || isSubmitting) return; - const userMsgId = Date.now().toString(); - const aiMsgId = (Date.now() + 1).toString(); + const userMsgId = crypto.randomUUID(); + const aiMsgId = crypto.randomUUID(); const newUserMsg: Message = { id: userMsgId, @@ -42,32 +42,60 @@ export default function Home() { body: JSON.stringify({ query: query }) }); - if (!res.ok) throw new Error("Failed to fetch"); + if (!res.ok) throw new Error(`Failed to fetch: ${res.status} ${res.statusText}`); const reader = res.body?.getReader(); const decoder = new TextDecoder(); if (!reader) throw new Error("No reader"); - let currentEvent = ""; + const processLine = (line: string, eventObj: { current: string }): boolean => { + if (line.startsWith("event: ")) { + eventObj.current = line.substring(7).trim(); + } else if (line.startsWith("data: ")) { + const currentData = line.substring(6); + const currentEvent = eventObj.current; + + if (currentEvent === "metadata" && currentData.trim() !== "") { + try { + const metaJson = JSON.parse(currentData); + const philosophersArray = Array.isArray(metaJson.philosophers) ? metaJson.philosophers : []; + setMessages((prev) => + prev.map(msg => msg.id === aiMsgId ? { ...msg, metadata: philosophersArray } : msg) + ); + } catch { console.error("Could not parse metadata event:", currentData) } + } else if (currentEvent === "content") { + // un-escape \\n to real newlines + const char = currentData.replace(/\\n/g, '\n'); + setMessages((prev) => + prev.map(msg => msg.id === aiMsgId ? { ...msg, content: msg.content + char } : msg) + ); + } else if (currentEvent === "error") { + console.error("Chat error:", currentData); + setMessages((prev) => + prev.map(msg => msg.id === aiMsgId ? { ...msg, content: currentData, isStreaming: false } : msg) + ); + return true; + } + } + return false; + }; + + const eventObj = { current: "" }; let buffer = ""; + let shouldStop = false; while (true) { const { done, value } = await reader.read(); if (done) { + // Flush the internal buffer of the decoder (for incomplete multi-byte chars) + buffer += decoder.decode(); // Process any remaining data in the buffer if (buffer) { const lines = buffer.split('\n'); for (const line of lines) { - if (line.startsWith("event: ")) { - currentEvent = line.substring(7).trim(); - } else if (line.startsWith("data: ")) { - const currentData = line.substring(6); - if (currentEvent === "content") { - const char = currentData.replace(/\\n/g, '\n'); - setMessages((prev) => - prev.map(msg => msg.id === aiMsgId ? { ...msg, content: msg.content + char } : msg) - ); - } + if (processLine(line, eventObj)) { + shouldStop = true; + break; } } } @@ -81,32 +109,15 @@ export default function Home() { buffer = lines.pop() || ""; for (const line of lines) { - if (line.startsWith("event: ")) { - currentEvent = line.substring(7).trim(); - } else if (line.startsWith("data: ")) { - const currentData = line.substring(6); - - if (currentEvent === "metadata" && currentData.trim() !== "") { - try { - const metaJson = JSON.parse(currentData); - setMessages((prev) => - prev.map(msg => msg.id === aiMsgId ? { ...msg, metadata: metaJson.philosophers } : msg) - ); - } catch (e) { console.error("Could not parse metadata event:", currentData) } - } else if (currentEvent === "content") { - // un-escape \\n to real newlines - const char = currentData.replace(/\\n/g, '\n'); - setMessages((prev) => - prev.map(msg => msg.id === aiMsgId ? { ...msg, content: msg.content + char } : msg) - ); - } else if (currentEvent === "error") { - console.error("Chat error:", currentData); - setMessages((prev) => - prev.map(msg => msg.id === aiMsgId ? { ...msg, content: currentData, isStreaming: false } : msg) - ); - } + if (processLine(line, eventObj)) { + shouldStop = true; + break; } } + if (shouldStop) { + await reader.cancel(); + break; + } } // Finish diff --git a/frontend/components/chat/ChatMain.tsx b/frontend/components/chat/ChatMain.tsx index 05b9b1e..3de8730 100644 --- a/frontend/components/chat/ChatMain.tsx +++ b/frontend/components/chat/ChatMain.tsx @@ -16,6 +16,14 @@ interface ChatMainProps { export function ChatMain({ messages, onSendMessage, isSubmitting, onClearChat }: ChatMainProps) { const messagesEndRef = useRef(null); const [shouldAutoScroll, setShouldAutoScroll] = useState(true); + const [startTime, setStartTime] = useState(""); + const [mounted, setMounted] = useState(false); + + useEffect(() => { + // eslint-disable-next-line react-hooks/set-state-in-effect + setStartTime(new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })); + setMounted(true); + }, []); const handleScroll = (e: React.UIEvent) => { const { scrollTop, scrollHeight, clientHeight } = e.currentTarget; @@ -36,7 +44,7 @@ export function ChatMain({ messages, onSendMessage, isSubmitting, onClearChat }:

미덕에 관한 대화

-

세션 시작: {new Date().toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}

+

세션 시작: {mounted ? startTime : ""}

-
+ ); } diff --git a/frontend/components/chat/MessageList.tsx b/frontend/components/chat/MessageList.tsx index a2e33b7..fe173ba 100644 --- a/frontend/components/chat/MessageList.tsx +++ b/frontend/components/chat/MessageList.tsx @@ -1,11 +1,12 @@ -import { Sparkles, SquareArrowOutUpRight, ThumbsUp, Copy, RotateCcw, ChevronRight, User } from "lucide-react"; -import { Message } from "../../types/chat"; +import { Sparkles, SquareArrowOutUpRight, ThumbsUp, Copy, RotateCcw } from "lucide-react"; +import { Message, DocumentMetadata } from "../../types/chat"; interface Props { messages: Message[]; + onOpenCitation?: (meta: DocumentMetadata) => void; } -export function MessageList({ messages }: Props) { +export function MessageList({ messages, onOpenCitation }: Props) { if (messages.length === 0) { return (
@@ -71,12 +72,16 @@ export function MessageList({ messages }: Props) { {/* Citation Cards if metadata exists */} {msg.metadata && msg.metadata.length > 0 && Array.from(new Set(msg.metadata.map(m => m.book_info.title))).map((title, idx) => { - const meta = msg.metadata!.find(m => m.book_info.title === title)!; + const meta = msg.metadata?.find(m => m.book_info.title === title); + if (!meta) return null; return (
{meta.book_info.cover_url && !meta.book_info.cover_url.includes("dummy") ? ( - {title} + <> + {/* eslint-disable-next-line @next/next/no-img-element */} + {title} + ) : ( {meta.scholar.charAt(0)} )} @@ -87,13 +92,16 @@ export function MessageList({ messages }: Props) { {meta.scholar} - {meta.school}

- + {onOpenCitation && ( + + )}
) })} diff --git a/frontend/components/sidebar/ActivePhilosophers.tsx b/frontend/components/sidebar/ActivePhilosophers.tsx index 3fc1c37..10ea5b0 100644 --- a/frontend/components/sidebar/ActivePhilosophers.tsx +++ b/frontend/components/sidebar/ActivePhilosophers.tsx @@ -1,14 +1,15 @@ -import { BrainCircuit, CheckCircle, Circle } from "lucide-react"; +import { BrainCircuit, CheckCircle } from "lucide-react"; import { DocumentMetadata } from "../../types/chat"; interface Props { metadata: DocumentMetadata[]; + onPhilosopherClick?: (scholar: string) => void; } -export function ActivePhilosophers({ metadata }: Props) { - // extract unique philosophers +export function ActivePhilosophers({ metadata, onPhilosopherClick }: Props) { const uniquePhilosophers = Array.from(new Set(metadata.map(m => m.scholar))) - .map(scholar => metadata.find(m => m.scholar === scholar)!); + .map(scholar => metadata.find(m => m.scholar === scholar)) + .filter((m): m is DocumentMetadata => m !== undefined); return (
@@ -20,10 +21,11 @@ export function ActivePhilosophers({ metadata }: Props) {

현재 참조 중인 철학자가 없습니다.

) : (
- {uniquePhilosophers.map((meta, i) => ( + {uniquePhilosophers.map((meta) => ( +
))} diff --git a/frontend/components/sidebar/Sidebar.tsx b/frontend/components/sidebar/Sidebar.tsx index d1f7024..595687f 100644 --- a/frontend/components/sidebar/Sidebar.tsx +++ b/frontend/components/sidebar/Sidebar.tsx @@ -26,7 +26,7 @@ export function Sidebar({ messages = [] }: SidebarProps) { {/* Scrollable Content */}
- + console.log('Philosopher clicked:', scholar)} />
@@ -44,13 +44,13 @@ export function Sidebar({ messages = [] }: SidebarProps) { {/* Bottom Controls */}
- - -
diff --git a/frontend/next.config.ts b/frontend/next.config.ts index 52b5aa3..02af782 100644 --- a/frontend/next.config.ts +++ b/frontend/next.config.ts @@ -6,6 +6,8 @@ const nextConfig: NextConfig = { { protocol: "https", hostname: "image.aladin.co.kr", + port: "", + pathname: "/**", } ] }