From 85aa2e31be1e3d70e6c2ae3919b31750b6bcfbcb Mon Sep 17 00:00:00 2001 From: Default Date: Fri, 29 Aug 2025 15:36:37 -0500 Subject: [PATCH 01/10] feat: Create modern Next.js frontend for OpenAI Chat API - Set up complete Next.js project with TypeScript and Tailwind CSS - Implement beautiful chat interface with streaming response support - Add secure API key input with password-style field and validation - Create responsive design with proper contrast and modern UX - Include settings panel for model selection and system message configuration - Add comprehensive documentation and setup instructions - Configure for Vercel deployment with environment variables - Implement real-time streaming chat with typing indicators - Add proper error handling and loading states --- frontend/.gitignore | 35 ++++ frontend/README.md | 197 +++++++++++++++++++- frontend/components/ApiKeySetup.tsx | 124 +++++++++++++ frontend/components/ChatInterface.tsx | 254 ++++++++++++++++++++++++++ frontend/components/MessageBubble.tsx | 48 +++++ frontend/next.config.js | 7 + frontend/package.json | 28 +++ frontend/pages/_app.tsx | 17 ++ frontend/pages/_document.tsx | 13 ++ frontend/pages/index.tsx | 46 +++++ frontend/postcss.config.js | 6 + frontend/styles/globals.css | 80 ++++++++ frontend/tailwind.config.js | 53 ++++++ frontend/tsconfig.json | 27 +++ frontend/types/index.ts | 8 + frontend/vercel.json | 9 + 16 files changed, 950 insertions(+), 2 deletions(-) create mode 100644 frontend/.gitignore create mode 100644 frontend/components/ApiKeySetup.tsx create mode 100644 frontend/components/ChatInterface.tsx create mode 100644 frontend/components/MessageBubble.tsx create mode 100644 frontend/next.config.js create mode 100644 frontend/package.json create mode 100644 frontend/pages/_app.tsx create mode 100644 frontend/pages/_document.tsx create mode 100644 frontend/pages/index.tsx create mode 100644 frontend/postcss.config.js create mode 100644 frontend/styles/globals.css create mode 100644 frontend/tailwind.config.js create mode 100644 frontend/tsconfig.json create mode 100644 frontend/types/index.ts create mode 100644 frontend/vercel.json diff --git a/frontend/.gitignore b/frontend/.gitignore new file mode 100644 index 000000000..8f322f0d8 --- /dev/null +++ b/frontend/.gitignore @@ -0,0 +1,35 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts diff --git a/frontend/README.md b/frontend/README.md index 56347bab6..ff0d7c561 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -1,3 +1,196 @@ -### Front End +# OpenAI Chat Frontend -Please populate this README with instructions on how to run the application! \ No newline at end of file +A beautiful, modern chat interface built with Next.js and TypeScript for interacting with the OpenAI Chat API backend. + +## Features + +- 🎨 **Modern UI**: Beautiful, responsive design with dark/light theme support +- πŸ” **Secure**: API key input with password-style field and client-side validation +- ⚑ **Real-time Streaming**: Live streaming responses from OpenAI's GPT models +- πŸ“± **Responsive**: Works perfectly on desktop, tablet, and mobile devices +- βš™οΈ **Configurable**: Adjustable system messages and model selection +- 🎯 **User-friendly**: Intuitive chat interface with typing indicators and message history + +## Prerequisites + +- Node.js 18.0 or higher +- npm or yarn package manager +- The FastAPI backend server running (see `../api/README.md`) +- An OpenAI API key + +## Installation + +1. **Navigate to the frontend directory:** + ```bash + cd frontend + ``` + +2. **Install dependencies:** + ```bash + npm install + ``` + +3. **Set up environment variables:** + ```bash + cp .env.example .env.local + ``` + + Edit `.env.local` and update the API URL if needed: + ``` + NEXT_PUBLIC_API_URL=http://localhost:8000 + ``` + +## Running the Application + +### Development Mode + +Start the development server: +```bash +npm run dev +``` + +The application will be available at `http://localhost:3000` + +### Production Build + +1. **Build the application:** + ```bash + npm run build + ``` + +2. **Start the production server:** + ```bash + npm start + ``` + +## Usage + +1. **Start the Backend**: Make sure the FastAPI backend is running on `http://localhost:8000` + +2. **Open the Frontend**: Navigate to `http://localhost:3000` in your browser + +3. **Enter API Key**: On first visit, you'll be prompted to enter your OpenAI API key + - Get your API key from [OpenAI Platform](https://platform.openai.com/api-keys) + - The key is stored securely in your browser session only + +4. **Start Chatting**: + - Type your message in the input field + - Press Enter to send (Shift+Enter for new lines) + - Watch as the AI responds in real-time with streaming + +5. **Configure Settings** (Optional): + - Click the settings icon to adjust the system message + - Select different GPT models (gpt-4.1-mini, gpt-4, gpt-3.5-turbo) + - Customize how the AI should behave + +## Deployment + +### Vercel Deployment + +This frontend is optimized for deployment on Vercel: + +1. **Connect your repository** to Vercel +2. **Set environment variables** in the Vercel dashboard: + - `NEXT_PUBLIC_API_URL`: Your deployed backend API URL +3. **Deploy**: Vercel will automatically build and deploy your application + +### Manual Deployment + +1. **Build the application:** + ```bash + npm run build + ``` + +2. **Export static files** (optional): + ```bash + npm run export + ``` + +3. **Deploy the `out/` or `.next/` directory** to your hosting provider + +## Project Structure + +``` +frontend/ +β”œβ”€β”€ components/ # React components +β”‚ β”œβ”€β”€ ApiKeySetup.tsx # API key input component +β”‚ β”œβ”€β”€ ChatInterface.tsx # Main chat interface +β”‚ └── MessageBubble.tsx # Individual message component +β”œβ”€β”€ pages/ # Next.js pages +β”‚ β”œβ”€β”€ _app.tsx # App wrapper +β”‚ β”œβ”€β”€ _document.tsx # HTML document structure +β”‚ └── index.tsx # Main page +β”œβ”€β”€ styles/ # Global styles +β”‚ └── globals.css # Tailwind CSS and custom styles +β”œβ”€β”€ types/ # TypeScript type definitions +β”‚ └── index.ts # Shared types +└── public/ # Static assets +``` + +## Configuration + +### Environment Variables + +- `NEXT_PUBLIC_API_URL`: The URL of your FastAPI backend (default: http://localhost:8000) + +### Styling + +The application uses Tailwind CSS with a custom design system: +- Proper contrast ratios for accessibility +- Responsive breakpoints for mobile-first design +- Dark/light theme variables +- Custom animations and transitions + +## Troubleshooting + +### Common Issues + +1. **"Backend server is not accessible"** + - Ensure the FastAPI backend is running on the correct port + - Check that CORS is properly configured in the backend + - Verify the `NEXT_PUBLIC_API_URL` environment variable + +2. **API Key Issues** + - Ensure your OpenAI API key starts with "sk-" + - Check that you have sufficient credits in your OpenAI account + - Verify the key has the necessary permissions + +3. **Build Errors** + - Run `npm install` to ensure all dependencies are installed + - Check for TypeScript errors with `npm run lint` + - Ensure Node.js version is 18.0 or higher + +4. **Streaming Not Working** + - Check browser console for JavaScript errors + - Verify the backend is returning streaming responses + - Test the backend API directly to isolate issues + +### Performance Tips + +- The application automatically handles message history and scrolling +- Streaming responses provide immediate feedback without waiting for complete responses +- API keys are stored securely in browser session storage +- Messages are optimized for mobile viewing with responsive design + +## Security Notes + +- API keys are stored only in browser session storage and never sent to our servers +- All communication with OpenAI happens through your own API key +- HTTPS is recommended for production deployments +- Consider implementing rate limiting for production use + +## Contributing + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Test thoroughly +5. Submit a pull request + +## Support + +For issues and questions: +1. Check the troubleshooting section above +2. Verify the backend API is working correctly +3. Check browser console for error messages +4. Ensure environment variables are set correctly \ No newline at end of file diff --git a/frontend/components/ApiKeySetup.tsx b/frontend/components/ApiKeySetup.tsx new file mode 100644 index 000000000..87181a8d3 --- /dev/null +++ b/frontend/components/ApiKeySetup.tsx @@ -0,0 +1,124 @@ +import { useState } from 'react' +import { Eye, EyeOff, Key, AlertCircle } from 'lucide-react' + +interface ApiKeySetupProps { + onApiKeySubmit: (apiKey: string) => void +} + +export default function ApiKeySetup({ onApiKeySubmit }: ApiKeySetupProps) { + const [apiKey, setApiKey] = useState('') + const [showApiKey, setShowApiKey] = useState(false) + const [error, setError] = useState('') + const [isLoading, setIsLoading] = useState(false) + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + setError('') + + if (!apiKey.trim()) { + setError('Please enter your OpenAI API key') + return + } + + if (!apiKey.startsWith('sk-')) { + setError('Invalid API key format. OpenAI API keys start with "sk-"') + return + } + + setIsLoading(true) + + try { + // Test the API key by making a health check to the backend + const response = await fetch(`${process.env.NEXT_PUBLIC_API_URL || 'http://localhost:8000'}/api/health`) + + if (!response.ok) { + throw new Error('Backend server is not accessible. Please ensure the API server is running.') + } + + onApiKeySubmit(apiKey.trim()) + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to validate API key') + } finally { + setIsLoading(false) + } + } + + return ( +
+
+
+
+
+ +
+

Welcome!

+

+ Enter your OpenAI API key to get started with the chat assistant. +

+
+ +
+
+ +
+ setApiKey(e.target.value)} + placeholder="sk-..." + className="w-full px-3 py-2 pr-10 border border-input rounded-md bg-background focus:ring-2 focus:ring-ring focus:border-transparent outline-none transition-colors" + disabled={isLoading} + /> + +
+
+ + {error && ( +
+ + {error} +
+ )} + + +
+ +
+

Your API key is stored securely in your browser session and never sent to our servers.

+

+ Don't have an API key?{' '} + + Get one from OpenAI + +

+
+
+
+
+ ) +} diff --git a/frontend/components/ChatInterface.tsx b/frontend/components/ChatInterface.tsx new file mode 100644 index 000000000..5ac200fd9 --- /dev/null +++ b/frontend/components/ChatInterface.tsx @@ -0,0 +1,254 @@ +import { useState, useRef, useEffect } from 'react' +import { Send, Settings, MessageSquare, User, Bot } from 'lucide-react' +import MessageBubble from './MessageBubble' +import { Message } from '@/types' + +interface ChatInterfaceProps { + apiKey: string + onApiKeyReset: () => void +} + +export default function ChatInterface({ apiKey, onApiKeyReset }: ChatInterfaceProps) { + const [messages, setMessages] = useState([]) + const [userMessage, setUserMessage] = useState('') + const [developerMessage, setDeveloperMessage] = useState('You are a helpful AI assistant. Please provide clear and helpful responses.') + const [model, setModel] = useState('gpt-4.1-mini') + const [isLoading, setIsLoading] = useState(false) + const [showSettings, setShowSettings] = useState(false) + const messagesEndRef = useRef(null) + const textareaRef = useRef(null) + + const scrollToBottom = () => { + messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' }) + } + + useEffect(() => { + scrollToBottom() + }, [messages]) + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault() + + if (!userMessage.trim() || isLoading) return + + const newUserMessage: Message = { + id: Date.now().toString(), + role: 'user', + content: userMessage, + timestamp: new Date() + } + + setMessages(prev => [...prev, newUserMessage]) + setUserMessage('') + setIsLoading(true) + + // Create assistant message with streaming content + const assistantMessage: Message = { + id: (Date.now() + 1).toString(), + role: 'assistant', + content: '', + timestamp: new Date(), + isStreaming: true + } + + setMessages(prev => [...prev, assistantMessage]) + + try { + const response = await fetch(`${process.env.NEXT_PUBLIC_API_URL || 'http://localhost:8000'}/api/chat`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + developer_message: developerMessage, + user_message: userMessage, + model, + api_key: apiKey + }) + }) + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`) + } + + if (!response.body) { + throw new Error('No response body') + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + + while (true) { + const { done, value } = await reader.read() + + if (done) break + + const chunk = decoder.decode(value) + + setMessages(prev => prev.map(msg => + msg.id === assistantMessage.id + ? { ...msg, content: msg.content + chunk } + : msg + )) + } + + // Mark streaming as complete + setMessages(prev => prev.map(msg => + msg.id === assistantMessage.id + ? { ...msg, isStreaming: false } + : msg + )) + + } catch (error) { + console.error('Chat error:', error) + setMessages(prev => prev.map(msg => + msg.id === assistantMessage.id + ? { + ...msg, + content: 'Sorry, there was an error processing your request. Please try again.', + isStreaming: false, + isError: true + } + : msg + )) + } finally { + setIsLoading(false) + } + } + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault() + handleSubmit(e as any) + } + } + + const handleTextareaResize = () => { + if (textareaRef.current) { + textareaRef.current.style.height = 'auto' + textareaRef.current.style.height = Math.min(textareaRef.current.scrollHeight, 120) + 'px' + } + } + + return ( +
+ {/* Header */} +
+
+
+ +
+
+

AI Assistant

+

{model}

+
+
+ +
+ + +
+
+ + {/* Settings Panel */} + {showSettings && ( +
+
+ + +
+ +
+ +