diff --git a/.agent/AGENTS.md b/.agent/AGENTS.md index 106d64be..4b1713ec 100644 --- a/.agent/AGENTS.md +++ b/.agent/AGENTS.md @@ -379,6 +379,7 @@ Subagents provide specialized capabilities. Read them when tasks require domain | `tools/build-agent/` | Agent design - composing efficient agents, reviewing agent instructions | build-agent, agent-review | | `tools/build-mcp/` | MCP development - creating Model Context Protocol servers and tools | build-mcp, api-wrapper, server-patterns, transports, deployment | | `tools/ai-assistants/` | AI tool integration - configuring assistants, CAPTCHA solving, multi-modal agents | agno, capsolver, windsurf, configuration, status | +| `tools/ai-orchestration/` | AI orchestration frameworks - visual builders, multi-agent teams, workflow automation | overview, langflow, crewai, autogen, packaging | | `tools/browser/` | Browser automation - web scraping, testing, screenshots, form filling | stagehand, playwright, playwriter, crawl4ai, dev-browser, pagespeed, chrome-devtools | | `tools/ui/` | UI components - component libraries, design systems, interface constraints | shadcn, ui-skills | | `tools/code-review/` | Code quality - linting, security scanning, style enforcement, PR reviews | code-standards, code-simplifier, codacy, coderabbit, qlty, snyk, secretlint, auditing | diff --git a/.agent/scripts/autogen-helper.sh b/.agent/scripts/autogen-helper.sh new file mode 100755 index 00000000..0ebb6f21 --- /dev/null +++ b/.agent/scripts/autogen-helper.sh @@ -0,0 +1,512 @@ +#!/bin/bash +# ============================================================================= +# AutoGen Helper Script +# ============================================================================= +# Microsoft AutoGen agentic AI framework setup and management +# +# Usage: +# bash .agent/scripts/autogen-helper.sh [action] +# +# Actions: +# setup Complete setup of AutoGen +# start Start AutoGen Studio +# stop Stop AutoGen Studio +# status Check AutoGen status +# check Check prerequisites +# help Show this help message +# ============================================================================= + +set -euo pipefail + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +# Configuration +AUTOGEN_DIR="$HOME/.aidevops/autogen" +AUTOGEN_STUDIO_PORT="${AUTOGEN_STUDIO_PORT:-8081}" +SCRIPTS_DIR="$HOME/.aidevops/scripts" +LOCALHOST_HELPER="$SCRIPTS_DIR/localhost-helper.sh" + +# Helper functions +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Port management integration with localhost-helper.sh +# Returns available port (original if free, or next available) +get_available_port() { + local desired_port="$1" + + # Use localhost-helper.sh if available + if [[ -x "$LOCALHOST_HELPER" ]]; then + if "$LOCALHOST_HELPER" check-port "$desired_port" >/dev/null 2>&1; then + echo "$desired_port" + return 0 + else + # Port in use, find alternative + local suggested + suggested=$("$LOCALHOST_HELPER" find-port "$((desired_port + 1))" 2>/dev/null) + if [[ -n "$suggested" ]]; then + print_warning "Port $desired_port in use, using $suggested instead" + echo "$suggested" + return 0 + fi + fi + fi + + # Fallback: basic port check using lsof + if ! lsof -i :"$desired_port" >/dev/null 2>&1; then + echo "$desired_port" + return 0 + fi + + # Find next available port + local port="$desired_port" + while lsof -i :"$port" >/dev/null 2>&1 && [[ $port -lt 65535 ]]; do + ((port++)) + done + + if [[ $port -lt 65535 ]]; then + print_warning "Port $desired_port in use, using $port instead" + echo "$port" + return 0 + fi + + print_error "No available ports found" + return 1 +} + +# Check prerequisites +check_prerequisites() { + local missing=0 + + print_info "Checking prerequisites..." + + # Check Python + if command -v python3 &> /dev/null; then + local python_version + python_version=$(python3 --version 2>&1 | cut -d' ' -f2) + local major minor + major=$(echo "$python_version" | cut -d. -f1) + minor=$(echo "$python_version" | cut -d. -f2) + + if [[ $major -ge 3 ]] && [[ $minor -ge 10 ]]; then + print_success "Python $python_version found (3.10+ required)" + else + print_error "Python 3.10+ required, found $python_version" + missing=1 + fi + else + print_error "Python 3 not found" + missing=1 + fi + + # Check pip + if command -v pip3 &> /dev/null || python3 -m pip --version &> /dev/null; then + print_success "pip found" + else + print_error "pip not found" + missing=1 + fi + + # Check for uv (preferred) + if command -v uv &> /dev/null; then + print_success "uv found (preferred package manager)" + else + print_warning "uv not found, will use pip" + fi + + if [[ $missing -eq 1 ]]; then + print_error "Missing prerequisites. Please install them first." + return 1 + fi + + print_success "All prerequisites met" + return 0 +} + +# Setup AutoGen +setup_autogen() { + print_info "Setting up AutoGen..." + + # Create directories + mkdir -p "$AUTOGEN_DIR" + mkdir -p "$SCRIPTS_DIR" + + cd "$AUTOGEN_DIR" || exit 1 + + # Create virtual environment + if [[ ! -d "venv" ]]; then + print_info "Creating virtual environment..." + python3 -m venv venv + fi + + # Activate venv + # shellcheck source=/dev/null + source venv/bin/activate + + # Install AutoGen + print_info "Installing AutoGen..." + if command -v uv &> /dev/null; then + uv pip install autogen-agentchat -U + uv pip install 'autogen-ext[openai]' -U + uv pip install autogenstudio -U + else + pip install autogen-agentchat -U + pip install 'autogen-ext[openai]' -U + pip install autogenstudio -U + fi + + # Create environment template + if [[ ! -f ".env.example" ]]; then + cat > .env.example << 'EOF' +# AutoGen Configuration for AI DevOps Framework +# Copy this file to .env and configure your API keys + +# OpenAI Configuration (Required for most agents) +OPENAI_API_KEY=your_openai_api_key_here + +# Anthropic Configuration (Optional) +ANTHROPIC_API_KEY=your_anthropic_key_here + +# Azure OpenAI Configuration (Optional) +AZURE_OPENAI_API_KEY=your_azure_key_here +AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ + +# Google Configuration (Optional) +GOOGLE_API_KEY=your_google_key_here + +# Local LLM Configuration (Ollama) +OLLAMA_BASE_URL=http://localhost:11434 + +# AutoGen Studio Configuration +AUTOGEN_STUDIO_PORT=8081 +AUTOGEN_STUDIO_APPDIR=./studio-data + +# Security Note: All processing runs locally +# No data is sent to external services unless you configure external LLMs +EOF + print_success "Created environment template" + fi + + # Copy template to .env if not exists + if [[ ! -f ".env" ]]; then + cp .env.example .env + print_info "Created .env file - please configure your API keys" + fi + + # Create example script + create_example_script + + # Create management scripts + create_management_scripts + + print_success "AutoGen setup complete" + print_info "Directory: $AUTOGEN_DIR" + print_info "Configure your API keys in .env file" + return 0 +} + +# Create example script +create_example_script() { + print_info "Creating example script..." + + cat > "$AUTOGEN_DIR/hello_autogen.py" << 'EXAMPLEEOF' +""" +AutoGen Hello World Example +AI DevOps Framework Integration +""" +import asyncio +import os +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +async def main(): + from autogen_agentchat.agents import AssistantAgent + from autogen_ext.models.openai import OpenAIChatCompletionClient + + # Create model client + model_client = OpenAIChatCompletionClient(model="gpt-4o-mini") + + # Create agent + agent = AssistantAgent( + "assistant", + model_client=model_client, + system_message="You are a helpful AI assistant." + ) + + # Run a simple task + print("Running AutoGen agent...") + result = await agent.run(task="Say 'Hello from AutoGen!' and explain what AutoGen is in one sentence.") + print(f"\nResult: {result}") + + # Clean up + await model_client.close() + +if __name__ == "__main__": + asyncio.run(main()) +EXAMPLEEOF + + print_success "Created example script: hello_autogen.py" + return 0 +} + +# Create management scripts +create_management_scripts() { + print_info "Creating management scripts..." + + mkdir -p "$SCRIPTS_DIR" + + # Create start script + cat > "$SCRIPTS_DIR/start-autogen-studio.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - AutoGen Studio Startup Script + +AUTOGEN_DIR="$HOME/.aidevops/autogen" +SCRIPTS_DIR="$HOME/.aidevops/scripts" +LOCALHOST_HELPER="$SCRIPTS_DIR/localhost-helper.sh" +DESIRED_PORT="${AUTOGEN_STUDIO_PORT:-8081}" +APPDIR="${AUTOGEN_STUDIO_APPDIR:-$AUTOGEN_DIR/studio-data}" + +echo "Starting AutoGen Studio..." + +# Check port availability using localhost-helper.sh +if [[ -x "$LOCALHOST_HELPER" ]]; then + if ! "$LOCALHOST_HELPER" check-port "$DESIRED_PORT" >/dev/null 2>&1; then + echo "[WARNING] Port $DESIRED_PORT is in use" + SUGGESTED=$("$LOCALHOST_HELPER" find-port "$((DESIRED_PORT + 1))" 2>/dev/null) + if [[ -n "$SUGGESTED" ]]; then + echo "[INFO] Using alternative port: $SUGGESTED" + DESIRED_PORT="$SUGGESTED" + fi + fi +else + # Fallback port check + if lsof -i :"$DESIRED_PORT" >/dev/null 2>&1; then + echo "[WARNING] Port $DESIRED_PORT is in use, finding alternative..." + while lsof -i :"$DESIRED_PORT" >/dev/null 2>&1 && [[ $DESIRED_PORT -lt 65535 ]]; do + ((DESIRED_PORT++)) + done + echo "[INFO] Using port: $DESIRED_PORT" + fi +fi + +if [[ -d "$AUTOGEN_DIR/venv" ]]; then + cd "$AUTOGEN_DIR" || exit 1 + + # Activate venv + source venv/bin/activate + + # Load environment + if [[ -f .env ]]; then + set -a + source .env + set +a + fi + + # Create app directory + mkdir -p "$APPDIR" + + # Start AutoGen Studio with available port + autogenstudio ui --port "$DESIRED_PORT" --appdir "$APPDIR" & + STUDIO_PID=$! + echo "$STUDIO_PID" > /tmp/autogen_studio_pid + echo "$DESIRED_PORT" > /tmp/autogen_studio_port + + sleep 5 + + echo "" + echo "AutoGen Studio started!" + echo "URL: http://localhost:$DESIRED_PORT" + echo "" + echo "Use 'stop-autogen-studio.sh' to stop" +else + echo "AutoGen not set up. Run setup first:" + echo " bash .agent/scripts/autogen-helper.sh setup" + exit 1 +fi +EOF + chmod +x "$SCRIPTS_DIR/start-autogen-studio.sh" + + # Create stop script + cat > "$SCRIPTS_DIR/stop-autogen-studio.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - AutoGen Studio Stop Script + +echo "Stopping AutoGen Studio..." + +if [[ -f /tmp/autogen_studio_pid ]]; then + STUDIO_PID=$(cat /tmp/autogen_studio_pid) + if kill -0 "$STUDIO_PID" 2>/dev/null; then + kill "$STUDIO_PID" + echo "Stopped AutoGen Studio (PID: $STUDIO_PID)" + fi + rm -f /tmp/autogen_studio_pid +fi + +# Fallback: kill autogenstudio processes +pkill -f "autogenstudio" 2>/dev/null || true + +echo "AutoGen Studio stopped" +EOF + chmod +x "$SCRIPTS_DIR/stop-autogen-studio.sh" + + # Create status script + cat > "$SCRIPTS_DIR/autogen-status.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - AutoGen Status Script + +# Get actual port (from saved file or default) +if [[ -f /tmp/autogen_studio_port ]]; then + PORT=$(cat /tmp/autogen_studio_port) +else + PORT="${AUTOGEN_STUDIO_PORT:-8081}" +fi + +echo "AutoGen Status" +echo "==============" + +# Check if Studio is running +if curl -s "http://localhost:$PORT" >/dev/null 2>&1; then + echo "AutoGen Studio: Running" + echo "URL: http://localhost:$PORT" +else + echo "AutoGen Studio: Not running" +fi + +echo "" +echo "Process Information:" +pgrep -f "autogenstudio" && ps aux | grep -E "autogenstudio" | grep -v grep || echo "No AutoGen Studio processes found" + +# Check AutoGen packages +echo "" +echo "Installed Packages:" +AUTOGEN_DIR="$HOME/.aidevops/autogen" +if [[ -d "$AUTOGEN_DIR/venv" ]]; then + source "$AUTOGEN_DIR/venv/bin/activate" + pip list 2>/dev/null | grep -E "autogen" || echo "AutoGen packages not found" +else + echo "AutoGen venv not found" +fi +EOF + chmod +x "$SCRIPTS_DIR/autogen-status.sh" + + print_success "Management scripts created in $SCRIPTS_DIR" + return 0 +} + +# Start AutoGen Studio +start_studio() { + if [[ -f "$SCRIPTS_DIR/start-autogen-studio.sh" ]]; then + "$SCRIPTS_DIR/start-autogen-studio.sh" + else + print_error "AutoGen not set up. Run 'setup' first." + return 1 + fi + return 0 +} + +# Stop AutoGen Studio +stop_studio() { + if [[ -f "$SCRIPTS_DIR/stop-autogen-studio.sh" ]]; then + "$SCRIPTS_DIR/stop-autogen-studio.sh" + else + pkill -f "autogenstudio" 2>/dev/null || true + fi + return 0 +} + +# Check status +check_status() { + if [[ -f "$SCRIPTS_DIR/autogen-status.sh" ]]; then + "$SCRIPTS_DIR/autogen-status.sh" + else + if curl -s "http://localhost:$AUTOGEN_STUDIO_PORT" >/dev/null 2>&1; then + print_success "AutoGen Studio is running at http://localhost:$AUTOGEN_STUDIO_PORT" + else + print_warning "AutoGen Studio is not running" + fi + fi + return 0 +} + +# Show usage +show_usage() { + echo "AI DevOps Framework - AutoGen Helper" + echo "" + echo "Usage: $0 [action]" + echo "" + echo "Actions:" + echo " setup Complete setup of AutoGen" + echo " start Start AutoGen Studio" + echo " stop Stop AutoGen Studio" + echo " status Check AutoGen status" + echo " check Check prerequisites" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 setup # Full setup" + echo " $0 start # Start Studio" + echo " $0 status # Check status" + echo "" + echo "URLs (after start):" + echo " AutoGen Studio: http://localhost:8081" + return 0 +} + +# Main function +main() { + local action="${1:-help}" + shift || true + + case "$action" in + "setup") + if check_prerequisites; then + setup_autogen + echo "" + print_success "AutoGen setup complete!" + echo "" + echo "Next Steps:" + echo "1. Configure API keys in $AUTOGEN_DIR/.env" + echo "2. Start AutoGen Studio: $SCRIPTS_DIR/start-autogen-studio.sh" + echo "3. Or run example: cd $AUTOGEN_DIR && source venv/bin/activate && python hello_autogen.py" + fi + ;; + "start") + start_studio + ;; + "stop") + stop_studio + ;; + "status") + check_status + ;; + "check") + check_prerequisites + ;; + "help"|*) + show_usage + ;; + esac + return 0 +} + +main "$@" diff --git a/.agent/scripts/crewai-helper.sh b/.agent/scripts/crewai-helper.sh new file mode 100755 index 00000000..a7d25979 --- /dev/null +++ b/.agent/scripts/crewai-helper.sh @@ -0,0 +1,681 @@ +#!/bin/bash +# ============================================================================= +# CrewAI Helper Script +# ============================================================================= +# Multi-agent orchestration framework setup and management +# +# Usage: +# bash .agent/scripts/crewai-helper.sh [action] +# +# Actions: +# setup Complete setup of CrewAI +# start Start CrewAI Studio +# stop Stop CrewAI Studio +# status Check CrewAI status +# check Check prerequisites +# create Create a new crew project +# run Run a crew +# help Show this help message +# ============================================================================= + +set -euo pipefail + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +# Configuration +CREWAI_DIR="$HOME/.aidevops/crewai" +CREWAI_STUDIO_PORT="${CREWAI_STUDIO_PORT:-8501}" +SCRIPTS_DIR="$HOME/.aidevops/scripts" +LOCALHOST_HELPER="$SCRIPTS_DIR/localhost-helper.sh" + +# Helper functions +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Port management integration with localhost-helper.sh +# Returns available port (original if free, or next available) +get_available_port() { + local desired_port="$1" + + # Use localhost-helper.sh if available + if [[ -x "$LOCALHOST_HELPER" ]]; then + if "$LOCALHOST_HELPER" check-port "$desired_port" >/dev/null 2>&1; then + echo "$desired_port" + return 0 + else + # Port in use, find alternative + local suggested + suggested=$("$LOCALHOST_HELPER" find-port "$((desired_port + 1))" 2>/dev/null) + if [[ -n "$suggested" ]]; then + print_warning "Port $desired_port in use, using $suggested instead" + echo "$suggested" + return 0 + fi + fi + fi + + # Fallback: basic port check using lsof + if ! lsof -i :"$desired_port" >/dev/null 2>&1; then + echo "$desired_port" + return 0 + fi + + # Find next available port + local port="$desired_port" + while lsof -i :"$port" >/dev/null 2>&1 && [[ $port -lt 65535 ]]; do + ((port++)) + done + + if [[ $port -lt 65535 ]]; then + print_warning "Port $desired_port in use, using $port instead" + echo "$port" + return 0 + fi + + print_error "No available ports found" + return 1 +} + +# Check prerequisites +check_prerequisites() { + local missing=0 + + print_info "Checking prerequisites..." + + # Check Python + if command -v python3 &> /dev/null; then + local python_version + python_version=$(python3 --version 2>&1 | cut -d' ' -f2) + local major minor + major=$(echo "$python_version" | cut -d. -f1) + minor=$(echo "$python_version" | cut -d. -f2) + + if [[ $major -ge 3 ]] && [[ $minor -ge 10 ]]; then + print_success "Python $python_version found (3.10+ required)" + else + print_error "Python 3.10+ required, found $python_version" + missing=1 + fi + else + print_error "Python 3 not found" + missing=1 + fi + + # Check pip + if command -v pip3 &> /dev/null || python3 -m pip --version &> /dev/null; then + print_success "pip found" + else + print_error "pip not found" + missing=1 + fi + + # Check for uv (preferred) + if command -v uv &> /dev/null; then + print_success "uv found (preferred package manager)" + else + print_warning "uv not found, will use pip" + fi + + if [[ $missing -eq 1 ]]; then + print_error "Missing prerequisites. Please install them first." + return 1 + fi + + print_success "All prerequisites met" + return 0 +} + +# Setup CrewAI +setup_crewai() { + print_info "Setting up CrewAI..." + + # Create directories + mkdir -p "$CREWAI_DIR" + mkdir -p "$SCRIPTS_DIR" + + cd "$CREWAI_DIR" || exit 1 + + # Create virtual environment + if [[ ! -d "venv" ]]; then + print_info "Creating virtual environment..." + python3 -m venv venv + fi + + # Activate venv + # shellcheck source=/dev/null + source venv/bin/activate + + # Install CrewAI + print_info "Installing CrewAI..." + if command -v uv &> /dev/null; then + uv pip install crewai -U + uv pip install 'crewai[tools]' -U + uv pip install streamlit -U + else + pip install crewai -U + pip install 'crewai[tools]' -U + pip install streamlit -U + fi + + # Create environment template + if [[ ! -f ".env.example" ]]; then + cat > .env.example << 'EOF' +# CrewAI Configuration for AI DevOps Framework +# Copy this file to .env and configure your API keys + +# OpenAI Configuration (Required for most crews) +OPENAI_API_KEY=your_openai_api_key_here + +# Anthropic Configuration (Optional) +ANTHROPIC_API_KEY=your_anthropic_key_here + +# Serper API for web search (Optional) +SERPER_API_KEY=your_serper_key_here + +# Google Configuration (Optional) +GOOGLE_API_KEY=your_google_key_here + +# Local LLM Configuration (Ollama) +OLLAMA_BASE_URL=http://localhost:11434 + +# CrewAI Configuration +CREWAI_TELEMETRY=false + +# CrewAI Studio Port +CREWAI_STUDIO_PORT=8501 + +# Security Note: All processing runs locally +# No data is sent to external services unless you configure external LLMs +EOF + print_success "Created environment template" + fi + + # Copy template to .env if not exists + if [[ ! -f ".env" ]]; then + cp .env.example .env + print_info "Created .env file - please configure your API keys" + fi + + # Create a simple studio app + create_studio_app + + # Create management scripts + create_management_scripts + + print_success "CrewAI setup complete" + print_info "Directory: $CREWAI_DIR" + print_info "Configure your API keys in .env file" + return 0 +} + +# Create a simple CrewAI Studio app +create_studio_app() { + print_info "Creating CrewAI Studio app..." + + cat > "$CREWAI_DIR/studio_app.py" << 'STUDIOEOF' +""" +CrewAI Studio - Simple Streamlit Interface +AI DevOps Framework Integration +""" +import streamlit as st +import os +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +st.set_page_config( + page_title="CrewAI Studio", + page_icon="🤖", + layout="wide" +) + +st.title("🤖 CrewAI Studio") +st.markdown("*AI DevOps Framework - Multi-Agent Orchestration*") + +# Sidebar configuration +st.sidebar.header("Configuration") + +# API Key status +openai_key = os.getenv("OPENAI_API_KEY", "") +if openai_key and openai_key != "your_openai_api_key_here": + st.sidebar.success("✅ OpenAI API Key configured") +else: + st.sidebar.warning("⚠️ OpenAI API Key not configured") + +# Model selection +model = st.sidebar.selectbox( + "Select Model", + ["gpt-4o-mini", "gpt-4o", "gpt-4-turbo", "ollama/llama3.2"] +) + +# Main content +tab1, tab2, tab3 = st.tabs(["Quick Crew", "Custom Crew", "Documentation"]) + +with tab1: + st.header("Quick Crew Builder") + + topic = st.text_input("Research Topic", placeholder="Enter a topic to research...") + + col1, col2 = st.columns(2) + with col1: + num_agents = st.slider("Number of Agents", 1, 5, 2) + with col2: + process_type = st.selectbox("Process Type", ["sequential", "hierarchical"]) + + if st.button("Run Crew", type="primary"): + if topic: + with st.spinner("Running crew..."): + try: + from crewai import Agent, Crew, Task, Process + + # Create agents + researcher = Agent( + role="Senior Researcher", + goal=f"Research {topic} thoroughly", + backstory="Expert researcher with deep knowledge.", + verbose=True + ) + + writer = Agent( + role="Content Writer", + goal="Create engaging content", + backstory="Skilled writer who makes complex topics accessible.", + verbose=True + ) + + # Create tasks + research_task = Task( + description=f"Research the topic: {topic}", + expected_output="Comprehensive research summary", + agent=researcher + ) + + writing_task = Task( + description="Write a report based on the research", + expected_output="Well-written report in markdown", + agent=writer + ) + + # Create crew + crew = Crew( + agents=[researcher, writer], + tasks=[research_task, writing_task], + process=Process.sequential if process_type == "sequential" else Process.hierarchical, + verbose=True + ) + + result = crew.kickoff() + + st.success("Crew completed!") + st.markdown("### Result") + st.markdown(str(result)) + + except Exception as e: + st.error(f"Error: {str(e)}") + else: + st.warning("Please enter a topic") + +with tab2: + st.header("Custom Crew Configuration") + st.info("For advanced crews, use the CrewAI CLI:") + st.code(""" +# Create a new crew project +crewai create crew my-project + +# Navigate to project +cd my-project + +# Edit configuration +# - src/my_project/config/agents.yaml +# - src/my_project/config/tasks.yaml + +# Run the crew +crewai run + """, language="bash") + +with tab3: + st.header("Documentation") + st.markdown(""" + ### Quick Links + - [CrewAI Documentation](https://docs.crewai.com) + - [CrewAI GitHub](https://github.com/crewAIInc/crewAI) + - [CrewAI Examples](https://github.com/crewAIInc/crewAI-examples) + + ### Key Concepts + + **Agents**: AI entities with roles, goals, and backstories + + **Tasks**: Specific assignments with descriptions and expected outputs + + **Crews**: Teams of agents working together on tasks + + **Flows**: Event-driven workflows for complex orchestration + + ### Process Types + + - **Sequential**: Tasks executed one after another + - **Hierarchical**: Manager agent delegates to workers + """) + +# Footer +st.markdown("---") +st.markdown("*Part of the [AI DevOps Framework](https://github.com/marcusquinn/aidevops)*") +STUDIOEOF + + print_success "Created CrewAI Studio app" + return 0 +} + +# Create management scripts +create_management_scripts() { + print_info "Creating management scripts..." + + mkdir -p "$SCRIPTS_DIR" + + # Create start script + cat > "$SCRIPTS_DIR/start-crewai-studio.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - CrewAI Studio Startup Script + +CREWAI_DIR="$HOME/.aidevops/crewai" +SCRIPTS_DIR="$HOME/.aidevops/scripts" +LOCALHOST_HELPER="$SCRIPTS_DIR/localhost-helper.sh" +DESIRED_PORT="${CREWAI_STUDIO_PORT:-8501}" + +echo "Starting CrewAI Studio..." + +# Check port availability using localhost-helper.sh +if [[ -x "$LOCALHOST_HELPER" ]]; then + if ! "$LOCALHOST_HELPER" check-port "$DESIRED_PORT" >/dev/null 2>&1; then + echo "[WARNING] Port $DESIRED_PORT is in use" + SUGGESTED=$("$LOCALHOST_HELPER" find-port "$((DESIRED_PORT + 1))" 2>/dev/null) + if [[ -n "$SUGGESTED" ]]; then + echo "[INFO] Using alternative port: $SUGGESTED" + DESIRED_PORT="$SUGGESTED" + fi + fi +else + # Fallback port check + if lsof -i :"$DESIRED_PORT" >/dev/null 2>&1; then + echo "[WARNING] Port $DESIRED_PORT is in use, finding alternative..." + while lsof -i :"$DESIRED_PORT" >/dev/null 2>&1 && [[ $DESIRED_PORT -lt 65535 ]]; do + ((DESIRED_PORT++)) + done + echo "[INFO] Using port: $DESIRED_PORT" + fi +fi + +if [[ -f "$CREWAI_DIR/studio_app.py" ]]; then + cd "$CREWAI_DIR" || exit 1 + + # Activate venv + source venv/bin/activate + + # Load environment + if [[ -f .env ]]; then + set -a + source .env + set +a + fi + + # Start Streamlit with available port + streamlit run studio_app.py --server.port "$DESIRED_PORT" --server.headless true & + STUDIO_PID=$! + echo "$STUDIO_PID" > /tmp/crewai_studio_pid + echo "$DESIRED_PORT" > /tmp/crewai_studio_port + + sleep 3 + + echo "" + echo "CrewAI Studio started!" + echo "URL: http://localhost:$DESIRED_PORT" + echo "" + echo "Use 'stop-crewai-studio.sh' to stop" +else + echo "CrewAI Studio not set up. Run setup first:" + echo " bash .agent/scripts/crewai-helper.sh setup" + exit 1 +fi +EOF + chmod +x "$SCRIPTS_DIR/start-crewai-studio.sh" + + # Create stop script + cat > "$SCRIPTS_DIR/stop-crewai-studio.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - CrewAI Studio Stop Script + +echo "Stopping CrewAI Studio..." + +if [[ -f /tmp/crewai_studio_pid ]]; then + STUDIO_PID=$(cat /tmp/crewai_studio_pid) + if kill -0 "$STUDIO_PID" 2>/dev/null; then + kill "$STUDIO_PID" + echo "Stopped CrewAI Studio (PID: $STUDIO_PID)" + fi + rm -f /tmp/crewai_studio_pid +fi + +# Fallback: kill streamlit processes +pkill -f "streamlit run studio_app.py" 2>/dev/null || true + +echo "CrewAI Studio stopped" +EOF + chmod +x "$SCRIPTS_DIR/stop-crewai-studio.sh" + + # Create status script + cat > "$SCRIPTS_DIR/crewai-status.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - CrewAI Status Script + +# Get actual port (from saved file or default) +if [[ -f /tmp/crewai_studio_port ]]; then + PORT=$(cat /tmp/crewai_studio_port) +else + PORT="${CREWAI_STUDIO_PORT:-8501}" +fi + +echo "CrewAI Status" +echo "=============" + +# Check if Studio is running +if curl -s "http://localhost:$PORT" >/dev/null 2>&1; then + echo "CrewAI Studio: Running" + echo "URL: http://localhost:$PORT" +else + echo "CrewAI Studio: Not running" +fi + +echo "" +echo "Process Information:" +pgrep -f "streamlit.*studio_app" && ps aux | grep -E "streamlit.*studio_app" | grep -v grep || echo "No CrewAI Studio processes found" + +# Check CrewAI CLI +echo "" +echo "CrewAI CLI:" +if command -v crewai &> /dev/null; then + crewai --version 2>/dev/null || echo "CrewAI CLI available" +else + echo "CrewAI CLI not in PATH (activate venv first)" +fi +EOF + chmod +x "$SCRIPTS_DIR/crewai-status.sh" + + print_success "Management scripts created in $SCRIPTS_DIR" + return 0 +} + +# Create a new crew project +create_crew() { + local project_name="${1:-my-crew}" + + print_info "Creating new crew project: $project_name" + + if [[ ! -d "$CREWAI_DIR/venv" ]]; then + print_error "CrewAI not set up. Run 'setup' first." + return 1 + fi + + cd "$CREWAI_DIR" || exit 1 + # shellcheck source=/dev/null + source venv/bin/activate + + crewai create crew "$project_name" + + print_success "Created crew project: $project_name" + print_info "Next steps:" + echo " cd $CREWAI_DIR/$project_name" + echo " crewai install" + echo " crewai run" + + return 0 +} + +# Run a crew +run_crew() { + local project_dir="${1:-.}" + + if [[ ! -d "$CREWAI_DIR/venv" ]]; then + print_error "CrewAI not set up. Run 'setup' first." + return 1 + fi + + # shellcheck source=/dev/null + source "$CREWAI_DIR/venv/bin/activate" + + cd "$project_dir" || exit 1 + + if [[ -f "pyproject.toml" ]]; then + crewai run + else + print_error "Not a CrewAI project directory (no pyproject.toml found)" + return 1 + fi + + return 0 +} + +# Start CrewAI Studio +start_studio() { + if [[ -f "$SCRIPTS_DIR/start-crewai-studio.sh" ]]; then + "$SCRIPTS_DIR/start-crewai-studio.sh" + else + print_error "CrewAI not set up. Run 'setup' first." + return 1 + fi + return 0 +} + +# Stop CrewAI Studio +stop_studio() { + if [[ -f "$SCRIPTS_DIR/stop-crewai-studio.sh" ]]; then + "$SCRIPTS_DIR/stop-crewai-studio.sh" + else + pkill -f "streamlit run studio_app.py" 2>/dev/null || true + fi + return 0 +} + +# Check status +check_status() { + if [[ -f "$SCRIPTS_DIR/crewai-status.sh" ]]; then + "$SCRIPTS_DIR/crewai-status.sh" + else + if curl -s "http://localhost:$CREWAI_STUDIO_PORT" >/dev/null 2>&1; then + print_success "CrewAI Studio is running at http://localhost:$CREWAI_STUDIO_PORT" + else + print_warning "CrewAI Studio is not running" + fi + fi + return 0 +} + +# Show usage +show_usage() { + echo "AI DevOps Framework - CrewAI Helper" + echo "" + echo "Usage: $0 [action] [options]" + echo "" + echo "Actions:" + echo " setup Complete setup of CrewAI" + echo " start Start CrewAI Studio" + echo " stop Stop CrewAI Studio" + echo " status Check CrewAI status" + echo " check Check prerequisites" + echo " create Create a new crew project" + echo " run Run a crew (in current directory)" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 setup # Full setup" + echo " $0 start # Start Studio" + echo " $0 create my-research-crew # Create new project" + echo " $0 run # Run crew in current dir" + echo "" + echo "URLs (after start):" + echo " CrewAI Studio: http://localhost:8501" + return 0 +} + +# Main function +main() { + local action="${1:-help}" + shift || true + + case "$action" in + "setup") + if check_prerequisites; then + setup_crewai + echo "" + print_success "CrewAI setup complete!" + echo "" + echo "Next Steps:" + echo "1. Configure API keys in $CREWAI_DIR/.env" + echo "2. Start CrewAI Studio: $SCRIPTS_DIR/start-crewai-studio.sh" + echo "3. Or create a project: crewai create crew my-project" + fi + ;; + "start") + start_studio + ;; + "stop") + stop_studio + ;; + "status") + check_status + ;; + "check") + check_prerequisites + ;; + "create") + create_crew "$@" + ;; + "run") + run_crew "$@" + ;; + "help"|*) + show_usage + ;; + esac + return 0 +} + +main "$@" diff --git a/.agent/scripts/langflow-helper.sh b/.agent/scripts/langflow-helper.sh new file mode 100755 index 00000000..65078a94 --- /dev/null +++ b/.agent/scripts/langflow-helper.sh @@ -0,0 +1,544 @@ +#!/bin/bash +# ============================================================================= +# Langflow Helper Script +# ============================================================================= +# Visual AI workflow builder setup and management +# +# Usage: +# bash .agent/scripts/langflow-helper.sh [action] +# +# Actions: +# setup Complete setup of Langflow +# start Start Langflow server +# stop Stop Langflow server +# status Check Langflow status +# check Check prerequisites +# export Export flows to JSON +# import Import flows from JSON +# help Show this help message +# ============================================================================= + +set -euo pipefail + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +# Configuration +LANGFLOW_DIR="$HOME/.aidevops/langflow" +LANGFLOW_PORT="${LANGFLOW_PORT:-7860}" +SCRIPTS_DIR="$HOME/.aidevops/scripts" +FLOWS_DIR="$LANGFLOW_DIR/flows" +LOCALHOST_HELPER="$SCRIPTS_DIR/localhost-helper.sh" + +# Helper functions +print_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Port management integration with localhost-helper.sh +# Returns available port (original if free, or next available) +get_available_port() { + local desired_port="$1" + + # Use localhost-helper.sh if available + if [[ -x "$LOCALHOST_HELPER" ]]; then + if "$LOCALHOST_HELPER" check-port "$desired_port" >/dev/null 2>&1; then + echo "$desired_port" + return 0 + else + # Port in use, find alternative + local suggested + suggested=$("$LOCALHOST_HELPER" find-port "$((desired_port + 1))" 2>/dev/null) + if [[ -n "$suggested" ]]; then + print_warning "Port $desired_port in use, using $suggested instead" + echo "$suggested" + return 0 + fi + fi + fi + + # Fallback: basic port check using lsof + if ! lsof -i :"$desired_port" >/dev/null 2>&1; then + echo "$desired_port" + return 0 + fi + + # Find next available port + local port="$desired_port" + while lsof -i :"$port" >/dev/null 2>&1 && [[ $port -lt 65535 ]]; do + ((port++)) + done + + if [[ $port -lt 65535 ]]; then + print_warning "Port $desired_port in use, using $port instead" + echo "$port" + return 0 + fi + + print_error "No available ports found" + return 1 +} + +# Check prerequisites +check_prerequisites() { + local missing=0 + + print_info "Checking prerequisites..." + + # Check Python + if command -v python3 &> /dev/null; then + local python_version + python_version=$(python3 --version 2>&1 | cut -d' ' -f2) + print_success "Python 3 found: $python_version" + else + print_error "Python 3 not found" + missing=1 + fi + + # Check pip + if command -v pip3 &> /dev/null || python3 -m pip --version &> /dev/null; then + print_success "pip found" + else + print_error "pip not found" + missing=1 + fi + + # Check for uv (preferred) or pip + if command -v uv &> /dev/null; then + print_success "uv found (preferred package manager)" + else + print_warning "uv not found, will use pip (consider installing uv for faster installs)" + fi + + if [[ $missing -eq 1 ]]; then + print_error "Missing prerequisites. Please install them first." + return 1 + fi + + print_success "All prerequisites met" + return 0 +} + +# Setup Langflow +setup_langflow() { + print_info "Setting up Langflow..." + + # Create directories + mkdir -p "$LANGFLOW_DIR" + mkdir -p "$FLOWS_DIR" + mkdir -p "$SCRIPTS_DIR" + + cd "$LANGFLOW_DIR" || exit 1 + + # Create virtual environment + if [[ ! -d "venv" ]]; then + print_info "Creating virtual environment..." + python3 -m venv venv + fi + + # Activate venv + # shellcheck source=/dev/null + source venv/bin/activate + + # Install Langflow + print_info "Installing Langflow..." + if command -v uv &> /dev/null; then + uv pip install langflow -U + else + pip install langflow -U + fi + + # Create environment template + if [[ ! -f ".env.example" ]]; then + cat > .env.example << 'EOF' +# Langflow Configuration for AI DevOps Framework +# Copy this file to .env and configure your API keys + +# OpenAI Configuration (Required for most flows) +OPENAI_API_KEY=your_openai_api_key_here + +# Anthropic Configuration (Optional) +ANTHROPIC_API_KEY=your_anthropic_key_here + +# Google Configuration (Optional) +GOOGLE_API_KEY=your_google_key_here + +# Langflow Server Configuration +LANGFLOW_HOST=0.0.0.0 +LANGFLOW_PORT=7860 +LANGFLOW_WORKERS=1 +LANGFLOW_LOG_LEVEL=INFO + +# Database Configuration (default: SQLite) +# For production, use PostgreSQL: +# LANGFLOW_DATABASE_URL=postgresql://user:password@localhost:5432/langflow + +# Local LLM Configuration (Ollama) +OLLAMA_BASE_URL=http://localhost:11434 + +# MCP Server (enable to expose flows as MCP tools) +LANGFLOW_MCP_ENABLED=false + +# Security Note: All processing runs locally +# No data is sent to external services unless you configure external LLMs +EOF + print_success "Created environment template" + fi + + # Copy template to .env if not exists + if [[ ! -f ".env" ]]; then + cp .env.example .env + print_info "Created .env file - please configure your API keys" + fi + + # Create startup script + cat > start_langflow.sh << 'EOF' +#!/bin/bash +cd "$(dirname "$0")" || exit +source venv/bin/activate + +# Load environment variables +if [[ -f .env ]]; then + set -a + # shellcheck source=/dev/null + source .env + set +a +fi + +# Start Langflow +langflow run --host "${LANGFLOW_HOST:-0.0.0.0}" --port "${LANGFLOW_PORT:-7860}" +EOF + chmod +x start_langflow.sh + + # Create management scripts + create_management_scripts + + print_success "Langflow setup complete" + print_info "Directory: $LANGFLOW_DIR" + print_info "Configure your API keys in .env file" + return 0 +} + +# Create management scripts +create_management_scripts() { + print_info "Creating management scripts..." + + mkdir -p "$SCRIPTS_DIR" + + # Create start script + cat > "$SCRIPTS_DIR/start-langflow.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - Langflow Startup Script + +LANGFLOW_DIR="$HOME/.aidevops/langflow" +SCRIPTS_DIR="$HOME/.aidevops/scripts" +LOCALHOST_HELPER="$SCRIPTS_DIR/localhost-helper.sh" +DESIRED_PORT="${LANGFLOW_PORT:-7860}" + +echo "Starting Langflow..." + +# Check port availability using localhost-helper.sh +if [[ -x "$LOCALHOST_HELPER" ]]; then + if ! "$LOCALHOST_HELPER" check-port "$DESIRED_PORT" >/dev/null 2>&1; then + echo "[WARNING] Port $DESIRED_PORT is in use" + SUGGESTED=$("$LOCALHOST_HELPER" find-port "$((DESIRED_PORT + 1))" 2>/dev/null) + if [[ -n "$SUGGESTED" ]]; then + echo "[INFO] Using alternative port: $SUGGESTED" + DESIRED_PORT="$SUGGESTED" + fi + fi +else + # Fallback port check + if lsof -i :"$DESIRED_PORT" >/dev/null 2>&1; then + echo "[WARNING] Port $DESIRED_PORT is in use, finding alternative..." + while lsof -i :"$DESIRED_PORT" >/dev/null 2>&1 && [[ $DESIRED_PORT -lt 65535 ]]; do + ((DESIRED_PORT++)) + done + echo "[INFO] Using port: $DESIRED_PORT" + fi +fi + +if [[ -f "$LANGFLOW_DIR/start_langflow.sh" ]]; then + cd "$LANGFLOW_DIR" || exit 1 + + # Export port for the startup script + export LANGFLOW_PORT="$DESIRED_PORT" + + ./start_langflow.sh & + LANGFLOW_PID=$! + echo "$LANGFLOW_PID" > /tmp/langflow_pid + echo "$DESIRED_PORT" > /tmp/langflow_port + + # Wait for startup + sleep 5 + + if curl -s "http://localhost:$DESIRED_PORT/health" >/dev/null 2>&1; then + echo "" + echo "Langflow started successfully!" + echo "URL: http://localhost:$DESIRED_PORT" + echo "API Docs: http://localhost:$DESIRED_PORT/docs" + echo "" + echo "Use 'stop-langflow.sh' to stop the server" + else + echo "Langflow may still be starting. Check http://localhost:$DESIRED_PORT" + fi +else + echo "Langflow not set up. Run setup first:" + echo " bash .agent/scripts/langflow-helper.sh setup" + exit 1 +fi +EOF + chmod +x "$SCRIPTS_DIR/start-langflow.sh" + + # Create stop script + cat > "$SCRIPTS_DIR/stop-langflow.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - Langflow Stop Script + +echo "Stopping Langflow..." + +if [[ -f /tmp/langflow_pid ]]; then + LANGFLOW_PID=$(cat /tmp/langflow_pid) + if kill -0 "$LANGFLOW_PID" 2>/dev/null; then + kill "$LANGFLOW_PID" + echo "Stopped Langflow (PID: $LANGFLOW_PID)" + fi + rm -f /tmp/langflow_pid +fi + +# Fallback: kill by port +pkill -f "langflow run" 2>/dev/null || true + +echo "Langflow stopped" +EOF + chmod +x "$SCRIPTS_DIR/stop-langflow.sh" + + # Create status script + cat > "$SCRIPTS_DIR/langflow-status.sh" << 'EOF' +#!/bin/bash +# AI DevOps Framework - Langflow Status Script + +# Get actual port (from saved file or default) +if [[ -f /tmp/langflow_port ]]; then + PORT=$(cat /tmp/langflow_port) +else + PORT="${LANGFLOW_PORT:-7860}" +fi + +echo "Langflow Status" +echo "===============" + +# Check if running +if curl -s "http://localhost:$PORT/health" >/dev/null 2>&1; then + echo "Status: Running" + echo "URL: http://localhost:$PORT" + echo "API Docs: http://localhost:$PORT/docs" +else + echo "Status: Not running" +fi + +echo "" +echo "Process Information:" +pgrep -f "langflow" && ps aux | grep -E "langflow" | grep -v grep || echo "No Langflow processes found" +EOF + chmod +x "$SCRIPTS_DIR/langflow-status.sh" + + print_success "Management scripts created in $SCRIPTS_DIR" + return 0 +} + +# Start Langflow +start_langflow() { + if [[ -f "$SCRIPTS_DIR/start-langflow.sh" ]]; then + "$SCRIPTS_DIR/start-langflow.sh" + else + print_error "Langflow not set up. Run 'setup' first." + return 1 + fi + return 0 +} + +# Stop Langflow +stop_langflow() { + if [[ -f "$SCRIPTS_DIR/stop-langflow.sh" ]]; then + "$SCRIPTS_DIR/stop-langflow.sh" + else + print_warning "Stop script not found. Attempting to kill Langflow processes..." + pkill -f "langflow run" 2>/dev/null || true + fi + return 0 +} + +# Check status +check_status() { + if [[ -f "$SCRIPTS_DIR/langflow-status.sh" ]]; then + "$SCRIPTS_DIR/langflow-status.sh" + else + if curl -s "http://localhost:$LANGFLOW_PORT/health" >/dev/null 2>&1; then + print_success "Langflow is running at http://localhost:$LANGFLOW_PORT" + else + print_warning "Langflow is not running" + fi + fi + return 0 +} + +# Export flows +export_flows() { + local output_dir="${1:-$FLOWS_DIR}" + + print_info "Exporting flows to $output_dir..." + + if [[ ! -d "$LANGFLOW_DIR/venv" ]]; then + print_error "Langflow not set up. Run 'setup' first." + return 1 + fi + + cd "$LANGFLOW_DIR" || exit 1 + # shellcheck source=/dev/null + source venv/bin/activate + + mkdir -p "$output_dir" + + # Export all flows + if langflow export --all --output "$output_dir" 2>/dev/null; then + print_success "Flows exported to $output_dir" + else + print_warning "No flows to export or export failed" + fi + + return 0 +} + +# Import flows +import_flows() { + local input_dir="${1:-$FLOWS_DIR}" + + print_info "Importing flows from $input_dir..." + + if [[ ! -d "$LANGFLOW_DIR/venv" ]]; then + print_error "Langflow not set up. Run 'setup' first." + return 1 + fi + + if [[ ! -d "$input_dir" ]]; then + print_error "Directory not found: $input_dir" + return 1 + fi + + cd "$LANGFLOW_DIR" || exit 1 + # shellcheck source=/dev/null + source venv/bin/activate + + # Import all JSON files + local count=0 + for flow_file in "$input_dir"/*.json; do + if [[ -f "$flow_file" ]]; then + if langflow import --file "$flow_file" 2>/dev/null; then + print_success "Imported: $(basename "$flow_file")" + ((count++)) + else + print_warning "Failed to import: $(basename "$flow_file")" + fi + fi + done + + if [[ $count -eq 0 ]]; then + print_warning "No flows found to import" + else + print_success "Imported $count flows" + fi + + return 0 +} + +# Show usage +show_usage() { + echo "AI DevOps Framework - Langflow Helper" + echo "" + echo "Usage: $0 [action] [options]" + echo "" + echo "Actions:" + echo " setup Complete setup of Langflow" + echo " start Start Langflow server" + echo " stop Stop Langflow server" + echo " status Check Langflow status" + echo " check Check prerequisites" + echo " export Export flows to JSON (default: ~/.aidevops/langflow/flows/)" + echo " import Import flows from JSON (default: ~/.aidevops/langflow/flows/)" + echo " help Show this help message" + echo "" + echo "Examples:" + echo " $0 setup # Full setup" + echo " $0 start # Start server" + echo " $0 status # Check status" + echo " $0 export ./my-flows # Export to custom directory" + echo " $0 import ./my-flows # Import from custom directory" + echo "" + echo "URLs (after start):" + echo " Web UI: http://localhost:7860" + echo " API Docs: http://localhost:7860/docs" + echo " Health: http://localhost:7860/health" + return 0 +} + +# Main function +main() { + local action="${1:-help}" + shift || true + + case "$action" in + "setup") + if check_prerequisites; then + setup_langflow + echo "" + print_success "Langflow setup complete!" + echo "" + echo "Next Steps:" + echo "1. Configure API keys in $LANGFLOW_DIR/.env" + echo "2. Start Langflow: $SCRIPTS_DIR/start-langflow.sh" + echo "3. Open http://localhost:7860" + fi + ;; + "start") + start_langflow + ;; + "stop") + stop_langflow + ;; + "status") + check_status + ;; + "check") + check_prerequisites + ;; + "export") + export_flows "$@" + ;; + "import") + import_flows "$@" + ;; + "help"|*) + show_usage + ;; + esac + return 0 +} + +main "$@" diff --git a/.agent/tools/ai-orchestration/autogen.md b/.agent/tools/ai-orchestration/autogen.md new file mode 100644 index 00000000..bd9ea3fe --- /dev/null +++ b/.agent/tools/ai-orchestration/autogen.md @@ -0,0 +1,406 @@ +--- +description: Microsoft AutoGen multi-agent framework - setup, usage, and integration +mode: subagent +tools: + read: true + write: true + edit: true + bash: true + glob: true + grep: true + webfetch: true +--- + +# AutoGen - Agentic AI Framework + + + +## Quick Reference + +- **Purpose**: Programming framework for agentic AI with multi-language support +- **License**: MIT (code) / CC-BY-4.0 (docs) +- **Setup**: `bash .agent/scripts/autogen-helper.sh setup` +- **Start**: `~/.aidevops/scripts/start-autogen-studio.sh` +- **Stop**: `~/.aidevops/scripts/stop-autogen-studio.sh` +- **Status**: `~/.aidevops/scripts/autogen-status.sh` +- **URL**: http://localhost:8081 (AutoGen Studio) +- **Config**: `~/.aidevops/autogen/.env` +- **Install**: `pip install autogen-agentchat autogen-ext[openai]` + +**Key Features**: + +- Multi-language support (Python, .NET) +- MCP server integration +- Human-in-the-loop workflows +- AgentChat for rapid prototyping +- Core API for advanced control + + + +## Overview + +AutoGen is a framework from Microsoft for creating multi-agent AI applications that can act autonomously or work alongside humans. It provides both high-level APIs for rapid prototyping and low-level control for production systems. + +## Architecture + +AutoGen uses a layered design: + +- **Core API**: Message passing, event-driven agents, distributed runtime +- **AgentChat API**: Simpler API for rapid prototyping +- **Extensions API**: First and third-party extensions + +## Installation + +### Automated Setup (Recommended) + +```bash +# Run the setup script +bash .agent/scripts/autogen-helper.sh setup + +# Configure API keys +nano ~/.aidevops/autogen/.env + +# Start AutoGen Studio +~/.aidevops/scripts/start-autogen-studio.sh +``` + +### Manual Installation + +```bash +# Create directory and virtual environment +mkdir -p ~/.aidevops/autogen +cd ~/.aidevops/autogen +python3 -m venv venv +source venv/bin/activate + +# Install AutoGen +pip install autogen-agentchat autogen-ext[openai] + +# Install AutoGen Studio +pip install autogenstudio +``` + +### Quick Start + +```bash +# Export your API key +export OPENAI_API_KEY="sk-..." + +# Run AutoGen Studio +autogenstudio ui --port 8081 +``` + +## Configuration + +### Environment Variables + +Create `~/.aidevops/autogen/.env`: + +```bash +# OpenAI Configuration (Required) +OPENAI_API_KEY=your_openai_api_key_here + +# Anthropic Configuration (Optional) +ANTHROPIC_API_KEY=your_anthropic_key_here + +# Azure OpenAI (Optional) +AZURE_OPENAI_API_KEY=your_azure_key_here +AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ + +# Local LLM (Ollama) +OLLAMA_BASE_URL=http://localhost:11434 + +# AutoGen Studio Configuration +AUTOGEN_STUDIO_PORT=8081 +``` + +## Usage + +### Hello World + +```python +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_ext.models.openai import OpenAIChatCompletionClient + +async def main(): + model_client = OpenAIChatCompletionClient(model="gpt-4.1") + agent = AssistantAgent("assistant", model_client=model_client) + print(await agent.run(task="Say 'Hello World!'")) + await model_client.close() + +asyncio.run(main()) +``` + +### MCP Server Integration + +```python +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient +from autogen_ext.tools.mcp import McpWorkbench, StdioServerParams + +async def main(): + model_client = OpenAIChatCompletionClient(model="gpt-4.1") + + # Connect to MCP server + server_params = StdioServerParams( + command="npx", + args=["@playwright/mcp@latest", "--headless"] + ) + + async with McpWorkbench(server_params) as mcp: + agent = AssistantAgent( + "web_assistant", + model_client=model_client, + workbench=mcp, + model_client_stream=True, + max_tool_iterations=10 + ) + await Console(agent.run_stream(task="Search for AutoGen documentation")) + +asyncio.run(main()) +``` + +### Multi-Agent Orchestration + +```python +import asyncio +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.tools import AgentTool +from autogen_agentchat.ui import Console +from autogen_ext.models.openai import OpenAIChatCompletionClient + +async def main(): + model_client = OpenAIChatCompletionClient(model="gpt-4.1") + + # Create specialist agents + math_agent = AssistantAgent( + "math_expert", + model_client=model_client, + system_message="You are a math expert.", + description="A math expert assistant.", + model_client_stream=True + ) + math_tool = AgentTool(math_agent, return_value_as_last_message=True) + + chemistry_agent = AssistantAgent( + "chemistry_expert", + model_client=model_client, + system_message="You are a chemistry expert.", + description="A chemistry expert assistant.", + model_client_stream=True + ) + chemistry_tool = AgentTool(chemistry_agent, return_value_as_last_message=True) + + # Create orchestrator agent + orchestrator = AssistantAgent( + "assistant", + system_message="You are a general assistant. Use expert tools when needed.", + model_client=model_client, + model_client_stream=True, + tools=[math_tool, chemistry_tool], + max_tool_iterations=10 + ) + + await Console(orchestrator.run_stream(task="What is the integral of x^2?")) + await Console(orchestrator.run_stream(task="What is the molecular weight of water?")) + +asyncio.run(main()) +``` + +### AutoGen Studio (GUI) + +```bash +# Start AutoGen Studio +autogenstudio ui --port 8081 --appdir ./my-app + +# Access at http://localhost:8081 +``` + +AutoGen Studio provides a no-code GUI for: + +- Building multi-agent workflows +- Testing agent interactions +- Prototyping without writing code + +## Local LLM Support + +### Using Ollama + +```python +from autogen_ext.models.ollama import OllamaChatCompletionClient + +model_client = OllamaChatCompletionClient( + model="llama3.2", + base_url="http://localhost:11434" +) +``` + +### Using Azure OpenAI + +```python +from autogen_ext.models.openai import AzureOpenAIChatCompletionClient + +model_client = AzureOpenAIChatCompletionClient( + model="gpt-4", + azure_endpoint="https://your-resource.openai.azure.com/", + api_version="2024-02-15-preview" +) +``` + +## .NET Support + +AutoGen also supports .NET for cross-language development: + +```csharp +using Microsoft.AutoGen.Contracts; +using Microsoft.AutoGen.Core; + +// Create an agent in .NET +var agent = new AssistantAgent("assistant", modelClient); +var result = await agent.RunAsync("Hello from .NET!"); +``` + +## Git Integration + +### Project Structure + +```text +my-autogen-project/ +├── .env # gitignored +├── agents/ +│ ├── researcher.py +│ └── writer.py +├── workflows/ +│ └── research_flow.py +└── config/ + └── settings.json # version controlled +``` + +### Version Control Best Practices + +```bash +# .gitignore +.env +__pycache__/ +*.pyc +.venv/ +venv/ +*.log +.autogen/ +``` + +## Deployment + +### Docker + +```dockerfile +FROM python:3.11-slim + +WORKDIR /app +COPY . . + +RUN pip install autogen-agentchat autogen-ext[openai] + +CMD ["python", "main.py"] +``` + +### FastAPI Integration + +```python +from fastapi import FastAPI +from autogen_agentchat.agents import AssistantAgent +from autogen_ext.models.openai import OpenAIChatCompletionClient + +app = FastAPI() + +@app.post("/chat") +async def chat(message: str): + model_client = OpenAIChatCompletionClient(model="gpt-4o-mini") + agent = AssistantAgent("assistant", model_client=model_client) + result = await agent.run(task=message) + await model_client.close() + return {"response": str(result)} +``` + +## Integration Examples + +### With aidevops Workflows + +```python +# DevOps automation with AutoGen +from autogen_agentchat.agents import AssistantAgent +from autogen_agentchat.tools import AgentTool + +# Create specialized DevOps agents +code_reviewer = AssistantAgent( + "code_reviewer", + system_message="You review code for quality and security issues." +) + +deployment_agent = AssistantAgent( + "deployment_agent", + system_message="You handle deployment tasks and CI/CD." +) + +# Orchestrate DevOps workflow +devops_orchestrator = AssistantAgent( + "devops_lead", + tools=[ + AgentTool(code_reviewer), + AgentTool(deployment_agent) + ] +) +``` + +### With Langflow + +AutoGen agents can be wrapped as Langflow custom components. + +### With CrewAI + +Both frameworks can be used together - AutoGen for conversational flows, CrewAI for role-based teams. + +## Troubleshooting + +### Common Issues + +**Import errors**: + +```bash +# Ensure packages are installed +pip install autogen-agentchat autogen-ext[openai] +``` + +**Async errors**: + +```python +# Always use asyncio.run() for async code +import asyncio +asyncio.run(main()) +``` + +**Model client not closing**: + +```python +# Always close model clients +await model_client.close() + +# Or use context manager +async with model_client: + # ... use client +``` + +### Migration from v0.2 + +If upgrading from AutoGen v0.2, see the [Migration Guide](https://microsoft.github.io/autogen/stable/user-guide/agentchat-user-guide/migration-guide.html). + +## Resources + +- **Documentation**: https://microsoft.github.io/autogen/ +- **GitHub**: https://github.com/microsoft/autogen +- **Discord**: https://aka.ms/autogen-discord +- **Blog**: https://devblogs.microsoft.com/autogen/ +- **PyPI**: https://pypi.org/project/autogen-agentchat/ diff --git a/.agent/tools/ai-orchestration/crewai.md b/.agent/tools/ai-orchestration/crewai.md new file mode 100644 index 00000000..bc25d7b6 --- /dev/null +++ b/.agent/tools/ai-orchestration/crewai.md @@ -0,0 +1,445 @@ +--- +description: CrewAI multi-agent orchestration - setup, usage, and integration +mode: subagent +tools: + read: true + write: true + edit: true + bash: true + glob: true + grep: true + webfetch: true +--- + +# CrewAI - Multi-Agent Orchestration Framework + + + +## Quick Reference + +- **Purpose**: Role-playing autonomous AI agents working as teams +- **License**: MIT (fully open-source, commercial use permitted) +- **Setup**: `bash .agent/scripts/crewai-helper.sh setup` +- **Start**: `~/.aidevops/scripts/start-crewai-studio.sh` +- **Stop**: `~/.aidevops/scripts/stop-crewai-studio.sh` +- **Status**: `~/.aidevops/scripts/crewai-status.sh` +- **URL**: http://localhost:8501 (CrewAI Studio) +- **Config**: `~/.aidevops/crewai/.env` +- **Install**: `pip install crewai` in venv at `~/.aidevops/crewai/venv/` + +**Key Features**: + +- Role-based autonomous agents +- Hierarchical task delegation +- YAML-based configuration +- Flows for event-driven control +- Sequential and parallel processes + + + +## Overview + +CrewAI is a lean, lightning-fast Python framework for orchestrating role-playing, autonomous AI agents. It empowers agents to work together seamlessly, tackling complex tasks through collaborative intelligence. + +## Key Concepts + +### Crews + +Teams of AI agents with defined roles, goals, and backstories working together on tasks. + +### Agents + +Individual AI entities with: + +- **Role**: Job title/function (e.g., "Senior Data Researcher") +- **Goal**: What the agent aims to achieve +- **Backstory**: Context that shapes behavior +- **Tools**: Capabilities the agent can use + +### Tasks + +Specific assignments with: + +- **Description**: What needs to be done +- **Expected Output**: Format/content of results +- **Agent**: Who performs the task + +### Flows + +Event-driven workflows for precise control over complex automations. + +## Installation + +### Automated Setup (Recommended) + +```bash +# Run the setup script +bash .agent/scripts/crewai-helper.sh setup + +# Configure API keys +nano ~/.aidevops/crewai/.env + +# Start CrewAI Studio +~/.aidevops/scripts/start-crewai-studio.sh +``` + +### Manual Installation + +```bash +# Create directory and virtual environment +mkdir -p ~/.aidevops/crewai +cd ~/.aidevops/crewai +python3 -m venv venv +source venv/bin/activate + +# Install CrewAI +pip install crewai + +# Install with tools +pip install 'crewai[tools]' +``` + +### Create a New Project + +```bash +# Create a new CrewAI project +crewai create crew my-project +cd my-project + +# Install dependencies +crewai install + +# Run the crew +crewai run +``` + +## Configuration + +### Environment Variables + +Create `~/.aidevops/crewai/.env`: + +```bash +# OpenAI Configuration (Required) +OPENAI_API_KEY=your_openai_api_key_here + +# Anthropic Configuration (Optional) +ANTHROPIC_API_KEY=your_anthropic_key_here + +# Serper API for web search (Optional) +SERPER_API_KEY=your_serper_key_here + +# Local LLM (Ollama) +OLLAMA_BASE_URL=http://localhost:11434 + +# CrewAI Configuration +CREWAI_TELEMETRY=false +``` + +### YAML Configuration + +**agents.yaml**: + +```yaml +researcher: + role: > + {topic} Senior Data Researcher + goal: > + Uncover cutting-edge developments in {topic} + backstory: > + You're a seasoned researcher with a knack for uncovering the latest + developments in {topic}. Known for your ability to find the most relevant + information and present it in a clear and concise manner. + +analyst: + role: > + {topic} Reporting Analyst + goal: > + Create detailed reports based on {topic} data analysis + backstory: > + You're a meticulous analyst with a keen eye for detail. You're known for + your ability to turn complex data into clear and concise reports. +``` + +**tasks.yaml**: + +```yaml +research_task: + description: > + Conduct thorough research about {topic}. + Make sure you find any interesting and relevant information. + expected_output: > + A list with 10 bullet points of the most relevant information about {topic} + agent: researcher + +reporting_task: + description: > + Review the context and expand each topic into a full section for a report. + expected_output: > + A fully fledged report with main topics, each with a full section. + Formatted as markdown. + agent: analyst + output_file: report.md +``` + +## Usage + +### Basic Crew Example + +```python +from crewai import Agent, Crew, Process, Task + +# Define agents +researcher = Agent( + role="Senior Researcher", + goal="Uncover groundbreaking technologies", + backstory="You are an expert researcher with deep knowledge of AI.", + verbose=True +) + +writer = Agent( + role="Tech Writer", + goal="Create engaging content about technology", + backstory="You are a skilled writer who makes complex topics accessible.", + verbose=True +) + +# Define tasks +research_task = Task( + description="Research the latest AI developments", + expected_output="A comprehensive summary of AI trends", + agent=researcher +) + +writing_task = Task( + description="Write an article based on the research", + expected_output="A well-written article about AI", + agent=writer +) + +# Create and run crew +crew = Crew( + agents=[researcher, writer], + tasks=[research_task, writing_task], + process=Process.sequential, + verbose=True +) + +result = crew.kickoff(inputs={"topic": "AI Agents"}) +print(result) +``` + +### Using Flows + +```python +from crewai.flow.flow import Flow, listen, start, router +from crewai import Crew, Agent, Task +from pydantic import BaseModel + +class MarketState(BaseModel): + sentiment: str = "neutral" + confidence: float = 0.0 + +class AnalysisFlow(Flow[MarketState]): + @start() + def fetch_data(self): + return {"sector": "tech", "timeframe": "1W"} + + @listen(fetch_data) + def analyze_with_crew(self, data): + analyst = Agent( + role="Market Analyst", + goal="Analyze market data", + backstory="Expert in market analysis" + ) + + task = Task( + description="Analyze {sector} sector for {timeframe}", + expected_output="Market analysis report", + agent=analyst + ) + + crew = Crew(agents=[analyst], tasks=[task]) + return crew.kickoff(inputs=data) + + @router(analyze_with_crew) + def route_result(self): + if self.state.confidence > 0.8: + return "high_confidence" + return "low_confidence" + +# Run the flow +flow = AnalysisFlow() +result = flow.kickoff() +``` + +### CrewAI Studio (GUI) + +```bash +# Start CrewAI Studio +~/.aidevops/scripts/start-crewai-studio.sh + +# Access at http://localhost:8501 +``` + +## Local LLM Support + +### Using Ollama + +```python +from crewai import Agent, LLM + +# Configure Ollama +llm = LLM( + model="ollama/llama3.2", + base_url="http://localhost:11434" +) + +agent = Agent( + role="Local AI Assistant", + goal="Help with tasks using local LLM", + backstory="You run entirely locally for privacy.", + llm=llm +) +``` + +### Using LM Studio + +```python +from crewai import LLM + +llm = LLM( + model="openai/local-model", + base_url="http://localhost:1234/v1", + api_key="not-needed" +) +``` + +## Git Integration + +### Project Structure + +```text +my-crew/ +├── .gitignore +├── pyproject.toml +├── README.md +├── .env # gitignored +└── src/ + └── my_crew/ + ├── __init__.py + ├── main.py + ├── crew.py + ├── tools/ + │ └── custom_tool.py + └── config/ + ├── agents.yaml # version controlled + └── tasks.yaml # version controlled +``` + +### Version Control Best Practices + +```bash +# .gitignore +.env +__pycache__/ +*.pyc +.venv/ +venv/ +*.log +``` + +Track these files: + +- `config/agents.yaml` - Agent definitions +- `config/tasks.yaml` - Task definitions +- `crew.py` - Crew orchestration logic +- `tools/*.py` - Custom tools + +## Deployment + +### Docker + +```dockerfile +FROM python:3.11-slim + +WORKDIR /app +COPY . . + +RUN pip install crewai 'crewai[tools]' + +CMD ["crewai", "run"] +``` + +### FastAPI Integration + +```python +from fastapi import FastAPI +from crewai import Crew + +app = FastAPI() + +@app.post("/run-crew") +async def run_crew(topic: str): + crew = create_my_crew() + result = crew.kickoff(inputs={"topic": topic}) + return {"result": str(result)} +``` + +## Integration Examples + +### With Langflow + +Use CrewAI agents as custom components in Langflow flows. + +### With aidevops Workflows + +```bash +# Create a DevOps automation crew +crewai create crew devops-automation + +# Configure agents for: +# - Code review +# - Documentation +# - Testing +# - Deployment +``` + +## Troubleshooting + +### Common Issues + +**Import errors**: + +```bash +# Ensure crewai is installed +pip install crewai 'crewai[tools]' +``` + +**API key errors**: + +```bash +# Check environment +echo $OPENAI_API_KEY + +# Or use .env file +source .env +``` + +**Memory issues with multiple agents**: + +```python +# Reduce verbosity +agent = Agent(..., verbose=False) + +# Use smaller models +llm = LLM(model="gpt-4o-mini") +``` + +## Resources + +- **Documentation**: https://docs.crewai.com +- **GitHub**: https://github.com/crewAIInc/crewAI +- **Community**: https://community.crewai.com +- **Examples**: https://github.com/crewAIInc/crewAI-examples +- **Courses**: https://learn.crewai.com diff --git a/.agent/tools/ai-orchestration/langflow.md b/.agent/tools/ai-orchestration/langflow.md new file mode 100644 index 00000000..93280fb4 --- /dev/null +++ b/.agent/tools/ai-orchestration/langflow.md @@ -0,0 +1,405 @@ +--- +description: Langflow visual AI workflow builder - setup, usage, and integration +mode: subagent +tools: + read: true + write: true + edit: true + bash: true + glob: true + grep: true + webfetch: true +--- + +# Langflow - Visual AI Workflow Builder + + + +## Quick Reference + +- **Purpose**: Visual drag-and-drop builder for AI-powered agents and workflows +- **License**: MIT (fully open-source, commercial use permitted) +- **Setup**: `bash .agent/scripts/langflow-helper.sh setup` +- **Start**: `~/.aidevops/scripts/start-langflow.sh` +- **Stop**: `~/.aidevops/scripts/stop-langflow.sh` +- **Status**: `~/.aidevops/scripts/langflow-status.sh` +- **URL**: http://localhost:7860 +- **Config**: `~/.aidevops/langflow/.env` +- **Install**: `pip install langflow` in venv at `~/.aidevops/langflow/venv/` +- **Privacy**: Flows stored locally, optional cloud sync + +**Key Features**: + +- Drag-and-drop visual flow builder +- Export flows to Python code +- Built-in MCP server support +- LangChain ecosystem integration +- Local LLM support (Ollama) + + + +## Overview + +Langflow is a powerful visual tool for building and deploying AI-powered agents and workflows. It provides developers with both a visual authoring experience and built-in API/MCP servers that turn every workflow into a tool. + +## Installation + +### Automated Setup (Recommended) + +```bash +# Run the setup script +bash .agent/scripts/langflow-helper.sh setup + +# Configure API keys +nano ~/.aidevops/langflow/.env + +# Start Langflow +~/.aidevops/scripts/start-langflow.sh +``` + +### Manual Installation + +```bash +# Create directory and virtual environment +mkdir -p ~/.aidevops/langflow +cd ~/.aidevops/langflow +python3 -m venv venv +source venv/bin/activate + +# Install Langflow +pip install langflow + +# Run Langflow +langflow run +``` + +### Docker Installation + +```bash +# Run with Docker +docker run -p 7860:7860 langflowai/langflow:latest + +# With persistent storage +docker run -p 7860:7860 -v langflow_data:/app/langflow langflowai/langflow:latest +``` + +### Desktop App + +Download Langflow Desktop from https://www.langflow.org/desktop for Windows/macOS. + +## Configuration + +### Environment Variables + +Create `~/.aidevops/langflow/.env`: + +```bash +# OpenAI Configuration +OPENAI_API_KEY=your_openai_api_key_here + +# Anthropic Configuration (optional) +ANTHROPIC_API_KEY=your_anthropic_key_here + +# Langflow Configuration +LANGFLOW_HOST=0.0.0.0 +LANGFLOW_PORT=7860 +LANGFLOW_WORKERS=1 + +# Database (default: SQLite) +LANGFLOW_DATABASE_URL=sqlite:///./langflow.db + +# Local LLM (Ollama) +OLLAMA_BASE_URL=http://localhost:11434 +``` + +### Custom Components + +Create custom components in Python: + +```python +# ~/.aidevops/langflow/components/my_component.py +from langflow.custom import CustomComponent +from langflow.schema import Data + +class MyCustomComponent(CustomComponent): + display_name = "My Custom Component" + description = "A custom component for aidevops" + + def build(self, input_text: str) -> Data: + # Your custom logic here + result = input_text.upper() + return Data(text=result) +``` + +Load custom components: + +```bash +langflow run --components-path ~/.aidevops/langflow/components/ +``` + +## Usage + +### Starting Services + +```bash +# Start Langflow +~/.aidevops/scripts/start-langflow.sh + +# Check status +~/.aidevops/scripts/langflow-status.sh + +# Stop Langflow +~/.aidevops/scripts/stop-langflow.sh +``` + +### Accessing the Interface + +- **Web UI**: http://localhost:7860 +- **API Docs**: http://localhost:7860/docs +- **Health Check**: http://localhost:7860/health + +### Building Your First Flow + +1. Open http://localhost:7860 +2. Click "New Flow" or use a template +3. Drag components from the sidebar +4. Connect components by dragging edges +5. Configure each component's parameters +6. Click "Run" to test the flow + +### Common Flow Patterns + +**RAG Pipeline**: + +```text +[Document Loader] → [Text Splitter] → [Embeddings] → [Vector Store] + ↓ +[User Input] → [Retriever] → [LLM] → [Output] +``` + +**Multi-Agent Chat**: + +```text +[User Input] → [Router Agent] → [Specialist Agent 1] + → [Specialist Agent 2] + → [Aggregator] → [Output] +``` + +## API Integration + +### REST API + +```python +import requests + +# Run a flow +response = requests.post( + "http://localhost:7860/api/v1/run/", + json={ + "input_value": "Hello, world!", + "output_type": "chat", + "input_type": "chat" + } +) +print(response.json()) +``` + +### MCP Server + +Langflow can expose flows as MCP tools: + +```bash +# Start with MCP server enabled +langflow run --mcp + +# Or configure in .env +LANGFLOW_MCP_ENABLED=true +``` + +Then connect from AI assistants that support MCP. + +## Git Integration + +### Exporting Flows + +```bash +# Export a flow to JSON +langflow export --flow-id --output flows/my-flow.json + +# Export all flows +langflow export --all --output flows/ +``` + +### Importing Flows + +```bash +# Import a flow from JSON +langflow import --file flows/my-flow.json + +# Import all flows from directory +langflow import --directory flows/ +``` + +### Version Control Best Practices + +```bash +# .gitignore additions +langflow.db +*.log +__pycache__/ +.env + +# Track these +flows/*.json +components/*.py +``` + +### Bi-directional Sync + +Use file watchers for automatic sync: + +```python +# sync_flows.py +from watchdog.observers import Observer +from watchdog.events import FileSystemEventHandler +import subprocess + +class FlowSyncHandler(FileSystemEventHandler): + def on_modified(self, event): + if event.src_path.endswith('.json'): + subprocess.run(['langflow', 'import', '--file', event.src_path]) + +# Run with: python sync_flows.py +``` + +## Local LLM Support + +### Using Ollama + +```bash +# Install Ollama +curl -fsSL https://ollama.com/install.sh | sh + +# Pull a model +ollama pull llama3.2 +ollama pull codellama + +# Configure in Langflow +# Add Ollama component and set base URL to http://localhost:11434 +``` + +### Using LM Studio + +1. Download LM Studio from https://lmstudio.ai +2. Load a model and start the local server +3. In Langflow, use OpenAI-compatible endpoint: http://localhost:1234/v1 + +## Deployment + +### Docker Compose + +```yaml +# docker-compose.yml +version: '3.8' +services: + langflow: + image: langflowai/langflow:latest + ports: + - "7860:7860" + volumes: + - langflow_data:/app/langflow + - ./flows:/app/flows + environment: + - OPENAI_API_KEY=${OPENAI_API_KEY} + restart: unless-stopped + +volumes: + langflow_data: +``` + +### Production Considerations + +- Use PostgreSQL instead of SQLite for production +- Enable authentication for multi-user deployments +- Use reverse proxy (nginx/traefik) for HTTPS +- Set up monitoring and logging + +## Troubleshooting + +### Common Issues + +**Port already in use**: + +```bash +# Find and kill process +lsof -i :7860 +kill -9 +``` + +**Database errors**: + +```bash +# Reset database +rm ~/.aidevops/langflow/langflow.db +langflow run +``` + +**Component not loading**: + +```bash +# Check component syntax +python -c "from components.my_component import MyCustomComponent" +``` + +### Logs + +```bash +# View logs +tail -f ~/.aidevops/langflow/langflow.log + +# Debug mode +LANGFLOW_LOG_LEVEL=DEBUG langflow run +``` + +## Integration Examples + +### With aidevops Workflows + +```bash +# Export flow for version control +langflow export --flow-id --output flows/devops-automation.json + +# Commit to git +git add flows/devops-automation.json +git commit -m "feat: add DevOps automation flow" +``` + +### With CrewAI + +Langflow can orchestrate CrewAI agents: + +1. Create a custom component that imports CrewAI +2. Define agents and tasks in the component +3. Connect to other Langflow components + +### With OpenCode + +Use Langflow flows as tools in OpenCode via MCP: + +```json +{ + "mcpServers": { + "langflow": { + "command": "langflow", + "args": ["run", "--mcp"] + } + } +} +``` + +## Resources + +- **Documentation**: https://docs.langflow.org +- **GitHub**: https://github.com/langflow-ai/langflow +- **Discord**: https://discord.gg/EqksyE2EX9 +- **Templates**: https://www.langflow.org/templates diff --git a/.agent/tools/ai-orchestration/overview.md b/.agent/tools/ai-orchestration/overview.md new file mode 100644 index 00000000..2e26ec15 --- /dev/null +++ b/.agent/tools/ai-orchestration/overview.md @@ -0,0 +1,323 @@ +--- +description: AI orchestration framework comparison and selection guide +mode: subagent +tools: + read: true + write: false + edit: false + bash: true + glob: true + grep: true + webfetch: true +--- + +# AI Orchestration Frameworks - Overview + + + +## Quick Reference + +- **Purpose**: Build and deploy AI-powered agents and multi-agent workflows +- **Frameworks**: Langflow, CrewAI, AutoGen, Agno +- **Common Pattern**: `~/.aidevops/{tool}/` with venv, .env, start scripts +- **All MIT Licensed**: Full commercial use permitted + +**Quick Setup**: + +```bash +# Langflow (visual flow builder) +bash .agent/scripts/langflow-helper.sh setup + +# CrewAI (multi-agent teams) +bash .agent/scripts/crewai-helper.sh setup + +# AutoGen (conversational agents) +bash .agent/scripts/autogen-helper.sh setup + +# Agno (enterprise agent OS) +bash .agent/scripts/agno-setup.sh setup +``` + +**Port Allocation** (auto-managed via `localhost-helper.sh`): + +| Tool | Default Port | Health Check | Port File | +|------|--------------|--------------|-----------| +| Langflow | 7860 | /health | /tmp/langflow_port | +| CrewAI Studio | 8501 | / | /tmp/crewai_studio_port | +| AutoGen Studio | 8081 | / | /tmp/autogen_studio_port | +| Agno | 7777 (API), 3000 (UI) | /health | - | + +**Port Conflict Resolution**: All helper scripts integrate with `localhost-helper.sh` for automatic port management. If a port is in use, an alternative is automatically selected. + + + +## Decision Matrix + +Use this matrix to select the right framework for your use case: + +| Objective | Recommended | Why | Alternatives | +|-----------|-------------|-----|--------------| +| **Rapid Prototyping (Visual)** | Langflow | Drag-and-drop GUI, exports to code, MCP server support | CrewAI Studio | +| **Multi-Agent Teams** | CrewAI | Hierarchical roles/tasks, sequential/parallel orchestration | AutoGen | +| **Conversational/Iterative** | AutoGen | Group chats, human-in-loop, code execution | CrewAI Flows | +| **Complex Orchestration** | Langflow | Stateful workflows, branching, LangGraph integration | CrewAI Flows | +| **Enterprise Agent OS** | Agno | Production-ready runtime, specialized DevOps agents | - | +| **Code-First Development** | CrewAI | YAML configs, Python decorators, minimal boilerplate | AutoGen | +| **Microsoft Ecosystem** | AutoGen | .NET support, Azure integration | - | +| **Local LLM Priority** | All | All support Ollama/local models | - | + +## Framework Comparison + +### Langflow + +**Best for**: Visual prototyping, RAG pipelines, quick iterations + +- **License**: MIT +- **Stars**: 143k+ +- **GUI**: Native web UI (localhost:7860) +- **Install**: `pip install langflow` +- **Run**: `langflow run` + +**Strengths**: + +- Drag-and-drop visual flow builder +- Exports flows to Python code +- Built-in MCP server support +- LangChain ecosystem integration +- Desktop app available + +**Use Cases**: + +- RAG applications +- Chatbot prototypes +- API workflow automation +- Visual debugging of agent flows + +### CrewAI + +**Best for**: Role-based multi-agent teams, production workflows + +- **License**: MIT +- **Stars**: 42.5k+ +- **GUI**: CrewAI Studio (Streamlit-based) +- **Install**: `pip install crewai` +- **Run**: `crewai run` + +**Strengths**: + +- Role-playing autonomous agents +- Hierarchical task delegation +- YAML-based configuration +- Flows for event-driven control +- Strong community (100k+ certified developers) + +**Use Cases**: + +- Content generation teams +- Research automation +- Sales/marketing workflows +- Code review pipelines + +### AutoGen + +**Best for**: Conversational agents, research tasks, Microsoft integration + +- **License**: MIT (code) / CC-BY-4.0 (docs) +- **Stars**: 53.4k+ +- **GUI**: AutoGen Studio +- **Install**: `pip install autogen-agentchat autogen-ext[openai]` +- **Run**: `autogenstudio ui` + +**Strengths**: + +- Multi-language support (Python, .NET) +- MCP server integration +- Human-in-the-loop workflows +- AgentChat for rapid prototyping +- Core API for advanced control + +**Use Cases**: + +- Code generation/review +- Research assistants +- Interactive debugging +- Enterprise .NET integration + +### Agno + +**Best for**: Enterprise DevOps, production agent runtime + +- **License**: MIT +- **GUI**: Agent-UI (localhost:3000) +- **Install**: `pip install "agno[all]"` +- **Run**: `~/.aidevops/scripts/start-agno-stack.sh` + +**Strengths**: + +- Complete local processing (privacy) +- Specialized DevOps agents +- Knowledge base support +- Production-ready runtime + +**Use Cases**: + +- Infrastructure automation +- Code review workflows +- Documentation generation +- DevOps task automation + +## Common Design Patterns + +All AI orchestration tools in aidevops follow these patterns: + +### Directory Structure + +```text +~/.aidevops/{tool}/ +├── venv/ # Python virtual environment +├── .env # API keys and configuration +├── .env.example # Template for .env +├── start_{tool}.sh # Startup script +└── {tool-specific}/ # Tool-specific files +``` + +### Helper Script Pattern + +Each tool has a helper script at `.agent/scripts/{tool}-helper.sh` with: + +```bash +# Standard commands +setup # Install and configure +start # Start services +stop # Stop services +status # Check health +check # Verify prerequisites +help # Show usage +``` + +### Configuration Template + +Each tool has a config template at `configs/{tool}-config.json.txt` with: + +- Default ports and URLs +- Agent definitions +- Model configuration +- Security settings +- Integration options + +### Management Scripts + +After setup, management scripts are created at `~/.aidevops/scripts/`: + +- `start-{tool}-stack.sh` - Start all services +- `stop-{tool}-stack.sh` - Stop all services +- `{tool}-status.sh` - Check service health + +## Integration with aidevops + +### Git Version Control + +All frameworks support exporting configurations for Git: + +| Framework | Export Format | Location | +|-----------|---------------|----------| +| Langflow | JSON flows | `flows/*.json` | +| CrewAI | YAML configs | `config/agents.yaml`, `config/tasks.yaml` | +| AutoGen | Python/JSON | `agents/*.py`, `*.json` | +| Agno | Python | `agent_os.py` | + +### Bi-directional Sync + +For Langflow, use the JSON bridge pattern: + +```bash +# Export flow to JSON +langflow export --flow-id --output flows/my-flow.json + +# Import JSON to Langflow +langflow import --file flows/my-flow.json +``` + +### Local LLM Support + +All frameworks support Ollama for local LLMs: + +```bash +# Install Ollama +curl -fsSL https://ollama.com/install.sh | sh + +# Pull a model +ollama pull llama3.2 + +# Configure in .env +OLLAMA_BASE_URL=http://localhost:11434 +``` + +## Packaging for Production + +See `packaging.md` for detailed deployment guides: + +- **Web/SaaS**: FastAPI + Docker + Kubernetes +- **Desktop**: PyInstaller executables +- **Mobile**: React Native/Flutter wrappers + +## Related Documentation + +| Document | Purpose | +|----------|---------| +| `langflow.md` | Langflow setup and usage | +| `crewai.md` | CrewAI setup and usage | +| `autogen.md` | AutoGen setup and usage | +| `agno.md` | Agno setup and usage | +| `packaging.md` | Deployment and packaging | + +## Troubleshooting + +### Common Issues + +**Port conflicts**: + +All AI orchestration helper scripts integrate with `localhost-helper.sh` for automatic port management. If a default port is in use, an alternative is automatically selected and saved to `/tmp/{tool}_port`. + +```bash +# Check port availability (uses localhost-helper.sh if available) +~/.aidevops/scripts/localhost-helper.sh check-port 7860 + +# Find next available port +~/.aidevops/scripts/localhost-helper.sh find-port 7860 + +# List all dev ports in use +~/.aidevops/scripts/localhost-helper.sh list-ports + +# Kill process on port +~/.aidevops/scripts/localhost-helper.sh kill-port 7860 + +# Manual fallback +lsof -i :7860 +kill -9 $(lsof -t -i:7860) +``` + +**Virtual environment issues**: + +```bash +# Recreate venv +rm -rf ~/.aidevops/{tool}/venv +bash .agent/scripts/{tool}-helper.sh setup +``` + +**API key errors**: + +```bash +# Verify .env file +cat ~/.aidevops/{tool}/.env + +# Check environment +env | grep -E "(OPENAI|ANTHROPIC|OLLAMA)" +``` + +### Getting Help + +- Langflow: https://github.com/langflow-ai/langflow/discussions +- CrewAI: https://community.crewai.com +- AutoGen: https://github.com/microsoft/autogen/discussions +- Agno: https://github.com/agno-ai/agno/discussions diff --git a/.agent/tools/ai-orchestration/packaging.md b/.agent/tools/ai-orchestration/packaging.md new file mode 100644 index 00000000..58267981 --- /dev/null +++ b/.agent/tools/ai-orchestration/packaging.md @@ -0,0 +1,647 @@ +--- +description: Packaging AI orchestration automations into deployable services +mode: subagent +tools: + read: true + write: true + edit: true + bash: true + glob: true + grep: true + webfetch: true +--- + +# Packaging AI Automations for Deployment + + + +## Quick Reference + +- **Purpose**: Turn AI orchestration workflows into deployable services +- **Targets**: Web/SaaS, Desktop apps, Mobile backends, APIs +- **Principle**: Zero lock-in, standard Python dependencies, exportable + +**Deployment Options**: + +| Target | Technology | Best For | +|--------|------------|----------| +| Web API | FastAPI + Docker | SaaS, microservices | +| Desktop | PyInstaller | Offline tools | +| Mobile Backend | FastAPI + Cloud | App backends | +| Serverless | Vercel/AWS Lambda | Event-driven | + +**Quick Commands**: + +```bash +# Build Docker image +docker build -t my-agent-api . + +# Create executable +pyinstaller --onefile main.py + +# Deploy to Vercel +vercel deploy +``` + + + +## Overview + +This guide covers packaging AI orchestration automations (Langflow, CrewAI, AutoGen, Agno) into production-ready services. The focus is on zero lock-in approaches using standard Python dependencies. + +## Web/SaaS Deployment + +### FastAPI Backend + +Create a REST API for your AI agents: + +```python +# api/main.py +from fastapi import FastAPI, HTTPException +from pydantic import BaseModel +from typing import Optional +import asyncio + +app = FastAPI( + title="AI Agent API", + description="AI DevOps Framework - Agent Service", + version="1.0.0" +) + +class AgentRequest(BaseModel): + task: str + context: Optional[dict] = None + +class AgentResponse(BaseModel): + result: str + status: str + +# CrewAI endpoint +@app.post("/crew/run", response_model=AgentResponse) +async def run_crew(request: AgentRequest): + from crewai import Crew, Agent, Task + + try: + agent = Agent( + role="Assistant", + goal="Complete the requested task", + backstory="You are a helpful AI assistant." + ) + + task = Task( + description=request.task, + expected_output="Task completion result", + agent=agent + ) + + crew = Crew(agents=[agent], tasks=[task]) + result = crew.kickoff() + + return AgentResponse(result=str(result), status="success") + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +# AutoGen endpoint +@app.post("/autogen/chat", response_model=AgentResponse) +async def autogen_chat(request: AgentRequest): + from autogen_agentchat.agents import AssistantAgent + from autogen_ext.models.openai import OpenAIChatCompletionClient + + try: + model_client = OpenAIChatCompletionClient(model="gpt-4o-mini") + agent = AssistantAgent("assistant", model_client=model_client) + result = await agent.run(task=request.task) + await model_client.close() + + return AgentResponse(result=str(result), status="success") + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +# Health check +@app.get("/health") +async def health_check(): + return {"status": "healthy"} +``` + +### Docker Deployment + +```dockerfile +# Dockerfile +FROM python:3.11-slim + +WORKDIR /app + +# Install dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application +COPY . . + +# Expose port +EXPOSE 8000 + +# Run with uvicorn +CMD ["uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "8000"] +``` + +**requirements.txt**: + +```text +fastapi>=0.100.0 +uvicorn>=0.23.0 +crewai>=0.1.0 +autogen-agentchat>=0.4.0 +autogen-ext[openai]>=0.4.0 +python-dotenv>=1.0.0 +``` + +### Docker Compose + +```yaml +# docker-compose.yml +version: '3.8' + +services: + agent-api: + build: . + ports: + - "8000:8000" + environment: + - OPENAI_API_KEY=${OPENAI_API_KEY} + volumes: + - ./data:/app/data + restart: unless-stopped + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/health"] + interval: 30s + timeout: 10s + retries: 3 + + # Optional: Redis for caching + redis: + image: redis:alpine + ports: + - "6379:6379" +``` + +### Kubernetes Deployment + +```yaml +# k8s/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: agent-api +spec: + replicas: 3 + selector: + matchLabels: + app: agent-api + template: + metadata: + labels: + app: agent-api + spec: + containers: + - name: agent-api + image: your-registry/agent-api:latest + ports: + - containerPort: 8000 + env: + - name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: api-secrets + key: openai-key + resources: + requests: + memory: "512Mi" + cpu: "250m" + limits: + memory: "2Gi" + cpu: "1000m" +--- +apiVersion: v1 +kind: Service +metadata: + name: agent-api-service +spec: + selector: + app: agent-api + ports: + - port: 80 + targetPort: 8000 + type: LoadBalancer +``` + +### SaaS Boilerplate + +Add authentication and billing: + +```python +# api/auth.py +from fastapi import Depends, HTTPException, Security +from fastapi.security import APIKeyHeader +import stripe + +api_key_header = APIKeyHeader(name="X-API-Key") + +async def verify_api_key(api_key: str = Security(api_key_header)): + # Verify API key against database + if not is_valid_key(api_key): + raise HTTPException(status_code=403, detail="Invalid API key") + return api_key + +# api/billing.py +stripe.api_key = os.getenv("STRIPE_SECRET_KEY") + +def create_usage_record(customer_id: str, quantity: int): + """Record API usage for billing""" + stripe.SubscriptionItem.create_usage_record( + subscription_item_id=get_subscription_item(customer_id), + quantity=quantity, + timestamp=int(time.time()) + ) +``` + +## Desktop Application + +### PyInstaller Executable + +```python +# desktop/main.py +import sys +import tkinter as tk +from tkinter import ttk, scrolledtext +import threading + +class AgentApp: + def __init__(self, root): + self.root = root + self.root.title("AI Agent Desktop") + self.root.geometry("800x600") + + # Input frame + input_frame = ttk.Frame(root, padding="10") + input_frame.pack(fill=tk.X) + + ttk.Label(input_frame, text="Task:").pack(side=tk.LEFT) + self.task_entry = ttk.Entry(input_frame, width=60) + self.task_entry.pack(side=tk.LEFT, padx=5) + + self.run_btn = ttk.Button(input_frame, text="Run", command=self.run_agent) + self.run_btn.pack(side=tk.LEFT) + + # Output area + self.output = scrolledtext.ScrolledText(root, height=30) + self.output.pack(fill=tk.BOTH, expand=True, padx=10, pady=10) + + def run_agent(self): + task = self.task_entry.get() + if not task: + return + + self.run_btn.config(state=tk.DISABLED) + self.output.insert(tk.END, f"\n> Running: {task}\n") + + # Run in background thread + thread = threading.Thread(target=self._execute_agent, args=(task,)) + thread.start() + + def _execute_agent(self, task): + try: + from crewai import Crew, Agent, Task + + agent = Agent( + role="Assistant", + goal="Help with tasks", + backstory="Helpful AI assistant" + ) + + crew_task = Task( + description=task, + expected_output="Result", + agent=agent + ) + + crew = Crew(agents=[agent], tasks=[crew_task]) + result = crew.kickoff() + + self.root.after(0, lambda: self._show_result(str(result))) + except Exception as e: + self.root.after(0, lambda: self._show_result(f"Error: {e}")) + + def _show_result(self, result): + self.output.insert(tk.END, f"\nResult:\n{result}\n") + self.run_btn.config(state=tk.NORMAL) + +if __name__ == "__main__": + root = tk.Tk() + app = AgentApp(root) + root.mainloop() +``` + +**Build executable**: + +```bash +# Install PyInstaller +pip install pyinstaller + +# Build single executable +pyinstaller --onefile --windowed desktop/main.py + +# Output in dist/main.exe (Windows) or dist/main (macOS/Linux) +``` + +### Electron Wrapper + +For a more polished desktop experience: + +```javascript +// electron/main.js +const { app, BrowserWindow } = require('electron'); +const { spawn } = require('child_process'); +const path = require('path'); + +let mainWindow; +let pythonProcess; + +function createWindow() { + mainWindow = new BrowserWindow({ + width: 1200, + height: 800, + webPreferences: { + nodeIntegration: true, + contextIsolation: false + } + }); + + // Start Python backend + pythonProcess = spawn('python', [ + path.join(__dirname, 'backend', 'server.py') + ]); + + // Load frontend + mainWindow.loadFile('index.html'); +} + +app.whenReady().then(createWindow); + +app.on('window-all-closed', () => { + if (pythonProcess) pythonProcess.kill(); + if (process.platform !== 'darwin') app.quit(); +}); +``` + +## Mobile Backend + +### API for Mobile Apps + +```python +# mobile_api/main.py +from fastapi import FastAPI, BackgroundTasks +from pydantic import BaseModel +import uuid + +app = FastAPI() + +# In-memory task storage (use Redis in production) +tasks = {} + +class MobileRequest(BaseModel): + task: str + user_id: str + +class TaskStatus(BaseModel): + task_id: str + status: str + result: str = None + +@app.post("/tasks/create") +async def create_task(request: MobileRequest, background_tasks: BackgroundTasks): + task_id = str(uuid.uuid4()) + tasks[task_id] = {"status": "pending", "result": None} + + # Run in background + background_tasks.add_task(process_task, task_id, request.task) + + return {"task_id": task_id} + +@app.get("/tasks/{task_id}", response_model=TaskStatus) +async def get_task_status(task_id: str): + if task_id not in tasks: + raise HTTPException(status_code=404, detail="Task not found") + + return TaskStatus( + task_id=task_id, + status=tasks[task_id]["status"], + result=tasks[task_id]["result"] + ) + +async def process_task(task_id: str, task: str): + tasks[task_id]["status"] = "processing" + + try: + # Run your agent here + result = await run_agent(task) + tasks[task_id]["result"] = result + tasks[task_id]["status"] = "completed" + except Exception as e: + tasks[task_id]["result"] = str(e) + tasks[task_id]["status"] = "failed" +``` + +### React Native Integration + +```javascript +// mobile/AgentService.js +const API_URL = 'https://your-api.com'; + +export async function createTask(task, userId) { + const response = await fetch(`${API_URL}/tasks/create`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${getToken()}` + }, + body: JSON.stringify({ task, user_id: userId }) + }); + return response.json(); +} + +export async function pollTaskStatus(taskId) { + const response = await fetch(`${API_URL}/tasks/${taskId}`); + return response.json(); +} +``` + +## Serverless Deployment + +### Vercel Functions + +```python +# api/agent.py (Vercel serverless function) +from http.server import BaseHTTPRequestHandler +import json + +class handler(BaseHTTPRequestHandler): + def do_POST(self): + content_length = int(self.headers['Content-Length']) + post_data = json.loads(self.rfile.read(content_length)) + + task = post_data.get('task', '') + + # Run agent (keep it lightweight for serverless) + result = run_lightweight_agent(task) + + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + self.wfile.write(json.dumps({'result': result}).encode()) +``` + +### AWS Lambda + +```python +# lambda_function.py +import json + +def lambda_handler(event, context): + body = json.loads(event.get('body', '{}')) + task = body.get('task', '') + + # Run agent + result = run_agent(task) + + return { + 'statusCode': 200, + 'body': json.dumps({'result': result}) + } +``` + +## Export Patterns + +### Langflow to Standalone + +```bash +# Export flow to Python +langflow export --flow-id --output my_flow.py + +# The exported file can run independently +python my_flow.py +``` + +### CrewAI Project Export + +```bash +# Create standalone project +crewai create crew my-project + +# Package for distribution +cd my-project +pip freeze > requirements.txt +``` + +### AutoGen Workflow Export + +```python +# Save workflow configuration +import json + +workflow_config = { + "agents": [ + {"name": "researcher", "role": "Research specialist"}, + {"name": "writer", "role": "Content writer"} + ], + "tasks": [ + {"description": "Research topic", "agent": "researcher"}, + {"description": "Write report", "agent": "writer"} + ] +} + +with open("workflow.json", "w") as f: + json.dump(workflow_config, f, indent=2) +``` + +## CI/CD Integration + +### GitHub Actions + +```yaml +# .github/workflows/deploy.yml +name: Deploy Agent API + +on: + push: + branches: [main] + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install dependencies + run: pip install -r requirements.txt + + - name: Run tests + run: pytest tests/ + + - name: Build Docker image + run: docker build -t agent-api . + + - name: Push to registry + run: | + docker tag agent-api ${{ secrets.REGISTRY }}/agent-api:${{ github.sha }} + docker push ${{ secrets.REGISTRY }}/agent-api:${{ github.sha }} + + - name: Deploy to Kubernetes + run: | + kubectl set image deployment/agent-api \ + agent-api=${{ secrets.REGISTRY }}/agent-api:${{ github.sha }} +``` + +## Best Practices + +### Zero Lock-in + +1. Use standard Python dependencies +2. Export configurations to JSON/YAML +3. Avoid proprietary formats +4. Document all external dependencies + +### Security + +1. Never hardcode API keys +2. Use environment variables or secret managers +3. Implement rate limiting +4. Add authentication for production APIs + +### Performance + +1. Use async/await for I/O operations +2. Implement caching where appropriate +3. Consider connection pooling for databases +4. Monitor memory usage with multiple agents + +### Monitoring + +```python +# Add observability +from opentelemetry import trace +from prometheus_client import Counter, Histogram + +agent_requests = Counter('agent_requests_total', 'Total agent requests') +agent_latency = Histogram('agent_latency_seconds', 'Agent request latency') + +@agent_latency.time() +async def run_agent_with_metrics(task): + agent_requests.inc() + return await run_agent(task) +``` diff --git a/README.md b/README.md index b30bcef5..caba97eb 100644 --- a/README.md +++ b/README.md @@ -439,6 +439,12 @@ ssh-keygen -t ed25519 -C "your-email@domain.com" - **[Agno](https://agno.com/)**: Local AI agent operating system for DevOps automation - **[Pandoc](https://pandoc.org/)**: Document conversion to markdown for AI processing +### **AI Orchestration Frameworks** + +- **[Langflow](https://langflow.org/)**: Visual drag-and-drop builder for AI workflows (MIT, localhost:7860) +- **[CrewAI](https://crewai.com/)**: Multi-agent teams with role-based orchestration (MIT, localhost:8501) +- **[AutoGen](https://microsoft.github.io/autogen/)**: Microsoft's agentic AI framework with MCP support (MIT, localhost:8081) + ### **WordPress Development** - **[LocalWP](https://localwp.com)**: WordPress development environment with MCP database access diff --git a/configs/autogen-config.json.txt b/configs/autogen-config.json.txt new file mode 100644 index 00000000..41459a03 --- /dev/null +++ b/configs/autogen-config.json.txt @@ -0,0 +1,118 @@ +{ + "autogen_config": { + "description": "Microsoft AutoGen configuration for AI DevOps Framework", + "version": "1.0.0", + "framework_integration": true, + "setup": { + "autogen_directory": "~/.aidevops/autogen", + "scripts_directory": "~/.aidevops/scripts", + "default_studio_port": 8081 + }, + "studio": { + "enabled": true, + "port": 8081, + "appdir": "~/.aidevops/autogen/studio-data" + }, + "agent_defaults": { + "model_client_stream": true, + "max_tool_iterations": 10 + }, + "model_providers": { + "openai": { + "enabled": true, + "api_key_env": "OPENAI_API_KEY", + "default_model": "gpt-4o-mini" + }, + "anthropic": { + "enabled": true, + "api_key_env": "ANTHROPIC_API_KEY" + }, + "azure_openai": { + "enabled": false, + "api_key_env": "AZURE_OPENAI_API_KEY", + "endpoint_env": "AZURE_OPENAI_ENDPOINT" + }, + "ollama": { + "enabled": true, + "base_url": "http://localhost:11434", + "default_model": "llama3.2" + } + }, + "extensions": { + "openai": { + "package": "autogen-ext[openai]", + "enabled": true + }, + "mcp": { + "package": "autogen-ext[mcp]", + "enabled": true, + "description": "MCP server integration for tool access" + }, + "docker": { + "package": "autogen-ext[docker]", + "enabled": false + } + }, + "mcp_integration": { + "enabled": true, + "supported_servers": [ + "@playwright/mcp", + "@anthropic/mcp-server-filesystem", + "custom MCP servers" + ] + }, + "integration": { + "git_version_control": { + "enabled": true, + "track_files": [ + "agents/*.py", + "workflows/*.py", + "config/*.json" + ] + }, + "aidevops_workflows": { + "enabled": true, + "helper_script": ".agent/scripts/autogen-helper.sh" + }, + "dotnet_support": { + "enabled": false, + "packages": [ + "Microsoft.AutoGen.Contracts", + "Microsoft.AutoGen.Core" + ] + } + } + }, + "setup_requirements": { + "python": { + "version": "3.10+", + "packages": [ + "autogen-agentchat", + "autogen-ext[openai]", + "autogenstudio" + ], + "virtual_environment": "required" + }, + "system": { + "memory": "4GB minimum, 8GB recommended", + "disk": "2GB for installation", + "network": "Internet for initial setup and cloud LLM APIs" + } + }, + "usage_examples": { + "setup": "bash .agent/scripts/autogen-helper.sh setup", + "start_studio": "~/.aidevops/scripts/start-autogen-studio.sh", + "stop_studio": "~/.aidevops/scripts/stop-autogen-studio.sh", + "status": "~/.aidevops/scripts/autogen-status.sh", + "run_example": "python hello_autogen.py", + "access_studio": "http://localhost:8081" + }, + "ai_devops_benefits": { + "multi_language": "Python and .NET support for cross-platform development", + "mcp_native": "Built-in MCP server integration for tool access", + "human_in_loop": "Support for human oversight in agent workflows", + "layered_api": "Core API for control, AgentChat for rapid prototyping", + "microsoft_ecosystem": "Azure OpenAI and enterprise integration", + "local_llm": "Full support for Ollama and local models" + } +} diff --git a/configs/crewai-config.json.txt b/configs/crewai-config.json.txt new file mode 100644 index 00000000..4437c5dc --- /dev/null +++ b/configs/crewai-config.json.txt @@ -0,0 +1,110 @@ +{ + "crewai_config": { + "description": "CrewAI configuration for AI DevOps Framework", + "version": "1.0.0", + "framework_integration": true, + "setup": { + "crewai_directory": "~/.aidevops/crewai", + "scripts_directory": "~/.aidevops/scripts", + "default_studio_port": 8501 + }, + "studio": { + "enabled": true, + "port": 8501, + "headless": true + }, + "crew_defaults": { + "process": "sequential", + "verbose": true, + "memory": false, + "cache": true, + "max_rpm": 10 + }, + "agent_defaults": { + "verbose": true, + "allow_delegation": true, + "max_iter": 15, + "max_rpm": 10 + }, + "model_providers": { + "openai": { + "enabled": true, + "api_key_env": "OPENAI_API_KEY", + "default_model": "gpt-4o-mini" + }, + "anthropic": { + "enabled": true, + "api_key_env": "ANTHROPIC_API_KEY", + "default_model": "claude-3-5-sonnet-20241022" + }, + "ollama": { + "enabled": true, + "base_url": "http://localhost:11434", + "default_model": "llama3.2" + } + }, + "tools": { + "serper": { + "enabled": false, + "api_key_env": "SERPER_API_KEY" + }, + "browserbase": { + "enabled": false + }, + "file_tools": { + "enabled": true + }, + "code_tools": { + "enabled": true + } + }, + "telemetry": { + "enabled": false, + "env_var": "CREWAI_TELEMETRY" + }, + "integration": { + "git_version_control": { + "enabled": true, + "track_configs": true, + "config_files": [ + "config/agents.yaml", + "config/tasks.yaml", + "crew.py" + ] + }, + "aidevops_workflows": { + "enabled": true, + "helper_script": ".agent/scripts/crewai-helper.sh" + } + } + }, + "setup_requirements": { + "python": { + "version": "3.10-3.13", + "packages": ["crewai", "crewai[tools]", "streamlit"], + "virtual_environment": "required" + }, + "system": { + "memory": "4GB minimum, 8GB recommended", + "disk": "2GB for installation", + "network": "Internet for initial setup and cloud LLM APIs" + } + }, + "usage_examples": { + "setup": "bash .agent/scripts/crewai-helper.sh setup", + "start_studio": "~/.aidevops/scripts/start-crewai-studio.sh", + "stop_studio": "~/.aidevops/scripts/stop-crewai-studio.sh", + "status": "~/.aidevops/scripts/crewai-status.sh", + "create_crew": "crewai create crew my-project", + "run_crew": "crewai run", + "access_studio": "http://localhost:8501" + }, + "ai_devops_benefits": { + "multi_agent": "Role-based autonomous agents working as teams", + "flexibility": "Sequential and hierarchical process types", + "code_first": "YAML configs with Python decorators", + "flows": "Event-driven workflows for complex orchestration", + "local_llm": "Full support for Ollama and local models", + "community": "100k+ certified developers, active support" + } +} diff --git a/configs/langflow-config.json.txt b/configs/langflow-config.json.txt new file mode 100644 index 00000000..3e9ff030 --- /dev/null +++ b/configs/langflow-config.json.txt @@ -0,0 +1,115 @@ +{ + "langflow_config": { + "description": "Langflow configuration for AI DevOps Framework", + "version": "1.0.0", + "framework_integration": true, + "setup": { + "langflow_directory": "~/.aidevops/langflow", + "flows_directory": "~/.aidevops/langflow/flows", + "scripts_directory": "~/.aidevops/scripts", + "default_port": 7860 + }, + "server": { + "host": "0.0.0.0", + "port": 7860, + "workers": 1, + "log_level": "INFO", + "database_url": "sqlite:///./langflow.db" + }, + "features": { + "mcp_server": { + "enabled": false, + "description": "Expose flows as MCP tools for AI assistants" + }, + "custom_components": { + "enabled": true, + "path": "~/.aidevops/langflow/components/" + }, + "flow_export": { + "format": "json", + "include_credentials": false + } + }, + "model_providers": { + "openai": { + "enabled": true, + "api_key_env": "OPENAI_API_KEY", + "default_model": "gpt-4o-mini" + }, + "anthropic": { + "enabled": true, + "api_key_env": "ANTHROPIC_API_KEY", + "default_model": "claude-3-5-sonnet-20241022" + }, + "ollama": { + "enabled": true, + "base_url": "http://localhost:11434", + "default_model": "llama3.2" + }, + "google": { + "enabled": false, + "api_key_env": "GOOGLE_API_KEY" + } + }, + "security": { + "authentication": false, + "api_key_storage": "environment_variables", + "cors_origins": ["http://localhost:*"], + "rate_limiting": false + }, + "integration": { + "git_version_control": { + "enabled": true, + "export_format": "json", + "auto_export": false, + "flows_directory": "flows/" + }, + "aidevops_workflows": { + "enabled": true, + "helper_script": ".agent/scripts/langflow-helper.sh" + } + }, + "deployment": { + "environment": "local", + "docker": { + "image": "langflowai/langflow:latest", + "port_mapping": "7860:7860" + }, + "production": { + "database": "postgresql", + "workers": 4, + "reverse_proxy": true + } + } + }, + "setup_requirements": { + "python": { + "version": "3.10+", + "packages": ["langflow"], + "virtual_environment": "required" + }, + "system": { + "memory": "4GB minimum, 8GB recommended", + "disk": "2GB for installation", + "network": "Internet for initial setup and cloud LLM APIs" + } + }, + "usage_examples": { + "setup": "bash .agent/scripts/langflow-helper.sh setup", + "start": "~/.aidevops/scripts/start-langflow.sh", + "stop": "~/.aidevops/scripts/stop-langflow.sh", + "status": "~/.aidevops/scripts/langflow-status.sh", + "export_flows": "bash .agent/scripts/langflow-helper.sh export ./flows", + "import_flows": "bash .agent/scripts/langflow-helper.sh import ./flows", + "access_ui": "http://localhost:7860", + "access_api": "http://localhost:7860/docs" + }, + "ai_devops_benefits": { + "visual_development": "Drag-and-drop interface for rapid prototyping", + "code_export": "Export flows to Python for production deployment", + "mcp_integration": "Expose flows as tools for AI assistants", + "version_control": "JSON export for Git-based workflow management", + "local_llm": "Full support for Ollama and local models", + "extensibility": "Custom Python components for specialized tasks" + } +} diff --git a/setup.sh b/setup.sh index 9e077ace..d33b2f20 100755 --- a/setup.sh +++ b/setup.sh @@ -1639,6 +1639,54 @@ setup_browser_tools() { print_info "Browser tools: dev-browser (stateful), Playwriter (extension), Stagehand (AI)" } +# Setup AI Orchestration Frameworks (Langflow, CrewAI, AutoGen) +setup_ai_orchestration() { + print_info "Setting up AI orchestration frameworks..." + + local has_python=false + + # Check Python + if command -v python3 &> /dev/null; then + local python_version + python_version=$(python3 --version 2>&1 | cut -d' ' -f2) + local major minor + major=$(echo "$python_version" | cut -d. -f1) + minor=$(echo "$python_version" | cut -d. -f2) + + if [[ $major -ge 3 ]] && [[ $minor -ge 10 ]]; then + has_python=true + print_success "Python $python_version found (3.10+ required)" + else + print_warning "Python 3.10+ required for AI orchestration, found $python_version" + fi + else + print_warning "Python 3 not found - AI orchestration frameworks unavailable" + return 0 + fi + + if [[ "$has_python" == "false" ]]; then + return 0 + fi + + # Create orchestration directory + mkdir -p "$HOME/.aidevops/orchestration" + + # Info about available frameworks + print_info "AI Orchestration Frameworks available:" + echo " - Langflow: Visual flow builder (localhost:7860)" + echo " - CrewAI: Multi-agent teams (localhost:8501)" + echo " - AutoGen: Microsoft agentic AI (localhost:8081)" + echo "" + print_info "Setup individual frameworks with:" + echo " bash .agent/scripts/langflow-helper.sh setup" + echo " bash .agent/scripts/crewai-helper.sh setup" + echo " bash .agent/scripts/autogen-helper.sh setup" + echo "" + print_info "See .agent/tools/ai-orchestration/overview.md for comparison" + + return 0 +} + # Setup OpenCode Plugins (Antigravity OAuth) # Helper function to add/update a single plugin in OpenCode config add_opencode_plugin() { @@ -2007,6 +2055,7 @@ main() { confirm_step "Setup Beads task management" && setup_beads confirm_step "Setup SEO MCP servers (DataForSEO, Serper)" && setup_seo_mcps confirm_step "Setup browser automation tools" && setup_browser_tools + confirm_step "Setup AI orchestration frameworks info" && setup_ai_orchestration confirm_step "Setup OpenCode plugins" && setup_opencode_plugins confirm_step "Setup Oh-My-OpenCode" && setup_oh_my_opencode