diff --git a/documentation/src/pages/recipes/data/recipes/openapi-to-locust.yaml b/documentation/src/pages/recipes/data/recipes/openapi-to-locust.yaml new file mode 100644 index 000000000000..49e9beb46170 --- /dev/null +++ b/documentation/src/pages/recipes/data/recipes/openapi-to-locust.yaml @@ -0,0 +1,93 @@ +version: "1.0.0" +title: "OpenAPI to Locust Load Test Generator" +description: "Generate comprehensive Locust load tests from OpenAPI/Swagger specifications" +author: + contact: Better-Boy + +instructions: | + You are an expert in API testing and load testing with Locust. + Your task is to generate production-ready Locust load test files from OpenAPI specifications. + + Follow this workflow: + 1. First, analyze the OpenAPI spec to understand the API structure + 2. Generate Locust task sets for different endpoint groups + 3. Create the main locustfile with proper configuration + 4. Generate supporting files (requirements.txt, README, config) + + Ensure the generated tests are: + - Well-structured with proper task weighting + - Include realistic user behavior patterns + - Have proper error handling and assertions + - Use parameterized data where appropriate + - Follow Locust best practices + +parameters: + - key: openapi_spec_path + input_type: string + requirement: required + description: "Path to the OpenAPI specification file (JSON or YAML)" + + - key: base_url + input_type: string + requirement: optional + default: "http://localhost:8000" + description: "Base URL for the API to test" + + - key: output_dir + input_type: string + requirement: optional + default: "./load_tests" + description: "Directory where generated test files will be saved" + + - key: test_complexity + input_type: string + requirement: optional + default: "standard" + description: "Test complexity level: basic, standard, or advanced" + + - key: include_auth + input_type: string + requirement: optional + default: "true" + description: "Whether to include authentication handling in tests" + +sub_recipes: + - name: analyze_openapi + path: "./subrecipes/analyze-openapi.yaml" + values: + analysis_depth: "comprehensive" + + - name: generate_task_sets + path: "./subrecipes/generate-task-sets.yaml" + + - name: generate_locustfile + path: "./subrecipes/generate-locustfile.yaml" + + - name: generate_support_files + path: "./subrecipes/generate-support-files.yaml" + +extensions: + - type: builtin + name: developer + timeout: 600 + bundled: true + +prompt: | + Generate complete Locust load tests from the OpenAPI specification at {{ openapi_spec_path }}. + + Configuration: + - Base URL: {{ base_url }} + - Output directory: {{ output_dir }} + - Test complexity: {{ test_complexity }} + - Include authentication: {{ include_auth }} + + Use the sub-recipe tools in this order: + 1. analyze_openapi - Parse and analyze the OpenAPI spec + 2. generate_task_sets - Create Locust TaskSets for endpoint groups + 3. generate_locustfile - Generate the main locustfile.py + 4. generate_support_files - Create requirements.txt, README.md, and config files + + After all subrecipes complete, verify all files were created successfully and provide: + - Summary of generated files + - Instructions for running the tests + - Example commands for different load scenarios \ No newline at end of file diff --git a/documentation/src/pages/recipes/data/recipes/subrecipes/analyze-openapi.yaml b/documentation/src/pages/recipes/data/recipes/subrecipes/analyze-openapi.yaml new file mode 100644 index 000000000000..3fd108935aed --- /dev/null +++ b/documentation/src/pages/recipes/data/recipes/subrecipes/analyze-openapi.yaml @@ -0,0 +1,71 @@ +version: "1.0.0" +title: "OpenAPI Spec Analyzer" +description: "Parse and analyze OpenAPI specification to extract endpoints, schemas, and auth details" +instructions: | + You are an API specification expert. Parse the OpenAPI/Swagger specification file + and extract all relevant information for load test generation. + + Your analysis should include: + - List of all endpoints grouped by tags/categories + - HTTP methods, paths, and parameters for each endpoint + - Request/response schemas and content types + - Authentication and security schemes + - Rate limits and other constraints if defined + + Output Format: + Create a structured analysis with: + 1. API metadata (title, version, description) + 2. Base servers/URLs + 3. Authentication methods used + 4. Endpoint groups with their operations + 5. Common schemas and data models + 6. Recommended test scenarios based on endpoint relationships + +parameters: + - key: output_dir + input_type: string + requirement: required + description: "Output directory" + + - key: openapi_spec_path + input_type: string + requirement: required + description: "Path to the OpenAPI specification file" + + - key: analysis_depth + input_type: string + requirement: optional + default: "standard" + description: "Depth of analysis: basic, standard, or comprehensive" + +extensions: + - type: builtin + name: developer + timeout: 300 + bundled: true + +prompt: | + Analyze the OpenAPI specification at {{ openapi_spec_path }}. + Perform a {{ analysis_depth }} analysis. + + Steps: + 1. Read and parse the spec file (handle both JSON and YAML formats) + 2. Extract API metadata and server information + 3. Identify all authentication/security schemes + 4. Group endpoints by tags or logical categories + 5. For each endpoint, document: + - HTTP method and path + - Path/query/header parameters + - Request body schema (if applicable) + - Response schemas + - Security requirements + 6. Identify endpoint relationships (e.g., POST /users before GET /users/{id}) + 7. Suggest realistic test scenarios and user flows + + Save the analysis to {{ output_dir }}/analysis.json for use by other sub-recipes. + + Present a summary with: + - Total number of endpoints + - Endpoints grouped by category + - Authentication types used + - Recommended test flow sequences diff --git a/documentation/src/pages/recipes/data/recipes/subrecipes/generate-locustfile.yaml b/documentation/src/pages/recipes/data/recipes/subrecipes/generate-locustfile.yaml new file mode 100644 index 000000000000..5cc3c4ed3756 --- /dev/null +++ b/documentation/src/pages/recipes/data/recipes/subrecipes/generate-locustfile.yaml @@ -0,0 +1,111 @@ +version: "1.0.0" +title: "Main Locustfile Generator" +description: "Generate the main locustfile.py that orchestrates all TaskSets" +instructions: | + You are a Locust expert. Create the main locustfile.py that ties together + all the TaskSets and provides the entry point for load testing. + + The main locustfile should: + - Import all TaskSet classes from the tasks directory + - Define HttpUser classes that use the TaskSets + - Configure wait times and user behavior + - Set up authentication and session handling + - Include environment configuration (host, users, spawn rate) + - Add event hooks for setup/teardown and custom stats + - Include command-line argument handling if needed + + Support multiple user types/personas if the API has different access levels + (e.g., AdminUser, RegularUser, GuestUser) + +parameters: + - key: output_dir + input_type: string + requirement: required + description: "Output directory" + + - key: base_url + input_type: string + requirement: required + description: "Base URL for the API" + + - key: include_auth + input_type: string + requirement: required + description: "Whether to include authentication" + + - key: test_complexity + input_type: string + requirement: required + description: "Test complexity level" + +extensions: + - type: builtin + name: developer + timeout: 300 + bundled: true + +prompt: | + Generate the main locustfile.py that orchestrates the load tests. + + Structure: + ```python + from locust import HttpUser, between, events + import os + from tasks.users_tasks import UsersTaskSet + from tasks.products_tasks import ProductsTaskSet + # ... import other TaskSets + + class APIUser(HttpUser): + wait_time = between(1, 3) + host = "{{ base_url }}" + + tasks = [UsersTaskSet, ProductsTaskSet] # Mix of all TaskSets + + def on_start(self): + """Called when a simulated user starts""" + {% if include_auth == "true" %} + # Perform login and store token + response = self.client.post("/auth/login", json={ + "username": "test_user", + "password": "test_pass" + }) + self.token = response.json().get("token") + self.client.headers.update({"Authorization": f"Bearer {self.token}"}) + {% endif %} + + def on_stop(self): + """Called when a simulated user stops""" + pass + + # Event hooks for custom behavior + @events.test_start.add_listener + def on_test_start(environment, **kwargs): + print("Load test starting...") + + @events.test_stop.add_listener + def on_test_stop(environment, **kwargs): + print("Load test complete!") + ``` + + {% if test_complexity == "advanced" %} + Include multiple user types with different behavior patterns: + - AdminUser: Has access to admin endpoints + - RegularUser: Normal user operations + - ReadOnlyUser: Only GET requests + + Use task distribution weights for realistic traffic patterns. + {% endif %} + + Add configuration for: + - Environment variables for credentials + - Custom headers + - Connection pooling settings + - Request/response hooks for logging + + Save to {{ output_dir }}/locustfile.py + + Also create a .env.example file with placeholder values for: + - API_BASE_URL + - AUTH_USERNAME + - AUTH_PASSWORD + - Any API keys or tokens needed diff --git a/documentation/src/pages/recipes/data/recipes/subrecipes/generate-support-files.yaml b/documentation/src/pages/recipes/data/recipes/subrecipes/generate-support-files.yaml new file mode 100644 index 000000000000..939beb6a541d --- /dev/null +++ b/documentation/src/pages/recipes/data/recipes/subrecipes/generate-support-files.yaml @@ -0,0 +1,168 @@ +version: "1.0.0" +title: "Support Files Generator" +description: "Generate requirements.txt, README.md, configuration files, and helper scripts" +instructions: | + You are a DevOps and documentation expert. Create all supporting files needed + to make the load tests easy to use and maintain. + + Generate the following files: + 1. requirements.txt - All Python dependencies + 2. README.md - Comprehensive documentation + 3. locust.conf - Configuration file for Locust + 4. docker-compose.yml - Optional Docker setup for distributed testing + 5. run_tests.sh - Shell script with example commands + 6. data/ - Sample data files for parameterized tests + 7. .gitignore - Ignore patterns for test artifacts + +parameters: + - key: output_dir + input_type: string + requirement: required + description: "Output directory" + + - key: base_url + input_type: string + requirement: required + description: "Base URL for API" + +extensions: + - type: builtin + name: developer + timeout: 300 + bundled: true + +prompt: | + Generate all support files for the Locust load tests. + + 1. Create {{ output_dir }}/requirements.txt: + ``` + locust>=2.15.0 + pyyaml>=6.0 + requests>=2.31.0 + python-dotenv>=1.0.0 + faker>=20.0.0 # For generating realistic test data + ``` + + 2. Create {{ output_dir }}/README.md with: + - Project overview and purpose + - API being tested (from analysis) + - Prerequisites and installation steps + - Configuration instructions + - How to run tests: + * Single machine mode + * Distributed mode (master/workers) + * Headless mode + * Web UI mode + - Example commands for different scenarios: + * Quick smoke test: `locust --headless -u 10 -r 2 -t 1m` + * Stress test: `locust --headless -u 100 -r 10 -t 10m` + * Web UI: `locust --web-port 8089` + - Interpreting results and metrics + - Troubleshooting common issues + - Project structure explanation + + 3. Create {{ output_dir }}/locust.conf: + ```conf + # Locust configuration file + host = {{ base_url }} + users = 100 + spawn-rate = 10 + run-time = 5m + headless = false + web-host = 127.0.0.1 + web-port = 8089 + loglevel = INFO + logfile = locust.log + html = reports/report.html + csv = reports/results + ``` + + 4. Create {{ output_dir }}/run_tests.sh: + ```bash + #!/bin/bash + # Example test execution script + + echo "Starting Locust load tests..." + + # Smoke test (10 users, 1 minute) + if [ "$1" == "smoke" ]; then + locust --headless -u 10 -r 2 -t 1m --html=reports/smoke_test.html + + # Load test (100 users, 10 minutes) + elif [ "$1" == "load" ]; then + locust --headless -u 100 -r 10 -t 10m --html=reports/load_test.html + + # Stress test (500 users, 30 minutes) + elif [ "$1" == "stress" ]; then + locust --headless -u 500 -r 50 -t 30m --html=reports/stress_test.html + + # Web UI mode (default) + else + echo "Starting Locust web UI on http://localhost:8089" + locust + fi + ``` + + 5. Create {{ output_dir }}/.gitignore: + ``` + # Python + __pycache__/ + *.py[cod] + *$py.class + .Python + venv/ + env/ + + # Locust + *.log + reports/ + .env + + # IDE + .vscode/ + .idea/ + ``` + + 6. Create {{ output_dir }}/data/sample_users.json with realistic test data: + ```json + [ + {"username": "user1", "email": "user1@example.com"}, + {"username": "user2", "email": "user2@example.com"} + ] + ``` + + 7. Create {{ output_dir }}/docker-compose.yml for distributed testing: + ```yaml + version: '3' + services: + master: + image: locustio/locust + ports: + - "8089:8089" + volumes: + - ./:/mnt/locust + command: -f /mnt/locust/locustfile.py --master + + worker: + image: locustio/locust + volumes: + - ./:/mnt/locust + command: -f /mnt/locust/locustfile.py --worker --master-host master + deploy: + replicas: 4 + ``` + + 8. Create {{ output_dir }}/pytest.ini for optional integration with pytest: + ```ini + [pytest] + testpaths = tests + python_files = test_*.py + python_functions = test_* + addopts = -v --tb=short + ``` + + Create a reports/ directory structure with a README explaining output files. + + Make all shell scripts executable (chmod +x). + + Provide a summary of all generated files and their purposes. diff --git a/documentation/src/pages/recipes/data/recipes/subrecipes/generate-task-sets.yaml b/documentation/src/pages/recipes/data/recipes/subrecipes/generate-task-sets.yaml new file mode 100644 index 000000000000..4cf99de2a79a --- /dev/null +++ b/documentation/src/pages/recipes/data/recipes/subrecipes/generate-task-sets.yaml @@ -0,0 +1,100 @@ +version: "1.0.0" +title: "Locust TaskSet Generator" +description: "Generate Locust TaskSet classes for different API endpoint groups" +instructions: | + You are a Locust performance testing expert. Generate TaskSet classes that + simulate realistic user behavior for different parts of the API. + + For each endpoint group, create a TaskSet that: + - Uses appropriate task weights based on expected traffic patterns + - Includes proper request construction with headers, params, and body + - Handles authentication tokens/sessions + - Validates responses with assertions + - Uses realistic wait times between requests + - Extracts data from responses for subsequent requests (e.g., IDs) + - Includes error handling and logging + + Follow Locust best practices: + - Use @task decorator with weights + - Use self.client for making requests + - Store shared data in self attributes + - Use context managers for setup/teardown if needed + - Add descriptive names for better reporting + +parameters: + - key: output_dir + input_type: string + requirement: required + description: "Directory for output files" + + - key: test_complexity + input_type: string + requirement: optional + default: "standard" + description: "Complexity level for tests" + + - key: include_auth + input_type: string + requirement: required + description: "Whether to include auth handling" + +extensions: + - type: builtin + name: developer + timeout: 600 + bundled: true + +prompt: | + Generate Locust TaskSet classes based on the API analysis. + + Use the analysis data to understand endpoint groups and relationships. + + For each endpoint group (e.g., Users, Products, Orders): + 1. Create a separate TaskSet class + 2. Implement tasks for each endpoint in that group + 3. Set realistic task weights (common operations get higher weights) + 4. Handle data dependencies between requests + + Example structure for each TaskSet: + ```python + from locust import TaskSet, task, between + import json + import random + + class UsersTaskSet(TaskSet): + wait_time = between(1, 3) + + def on_start(self): + # Setup code, e.g., login + pass + + @task(3) # Higher weight for common operations + def list_users(self): + with self.client.get("/users", catch_response=True) as response: + if response.status_code == 200: + response.success() + else: + response.failure(f"Got status code {response.status_code}") + + @task(2) + def get_user_details(self): + # Use data from previous requests + pass + + @task(1) + def create_user(self): + # POST with realistic payload + pass + ``` + + Save each TaskSet to a separate file in {{ output_dir }}/tasks/ + Files should be named: users_tasks.py, products_tasks.py, etc. + + Configuration: + - Test complexity: {{ test_complexity }} + - Include authentication: {{ include_auth }} + + If test_complexity is "advanced", include: + - Data-driven tests with parameterized inputs + - Complex user flows with multiple dependent requests + - Performance-focused optimizations