1+ # .github/workflows/e2e_test.yml
2+ name : E2E Tests
3+
4+ on : [push, pull_request_target]
5+
6+ jobs :
7+ e2e_tests :
8+ runs-on : ubuntu-latest
9+ env :
10+ OPENAI_API_KEY : ${{ secrets.OPENAI_API_KEY }}
11+
12+ steps :
13+ - uses : actions/checkout@v4
14+ with :
15+ ref : ${{ github.event.pull_request.head.sha }}
16+
17+ 18+ env :
19+ OPENAI_API_KEY : ${{ secrets.OPENAI_API_KEY }}
20+ with :
21+ path : ' .'
22+ isAbsolutePath : false
23+ file : ' lightspeed-stack.yaml'
24+ content : |
25+ name: foo bar baz
26+ service:
27+ host: 0.0.0.0
28+ port: 8080
29+ auth_enabled: false
30+ workers: 1
31+ color_log: true
32+ access_log: true
33+ llama_stack:
34+ # Uses a remote llama-stack service
35+ # The instance would have already been started with a llama-stack-run.yaml file
36+ use_as_library_client: false
37+ # Alternative for "as library use"
38+ # use_as_library_client: true
39+ # library_client_config_path: <path-to-llama-stack-run.yaml-file>
40+ url: http://llama-stack:8321
41+ api_key: xyzzy
42+ user_data_collection:
43+ feedback_disabled: false
44+ feedback_storage: "/tmp/data/feedback"
45+ transcripts_disabled: false
46+ transcripts_storage: "/tmp/data/transcripts"
47+ data_collector:
48+ enabled: false
49+ ingress_server_url: null
50+ ingress_server_auth_token: null
51+ ingress_content_service_name: null
52+ collection_interval: 7200 # 2 hours in seconds
53+ cleanup_after_send: true
54+ connection_timeout_seconds: 30
55+ authentication:
56+ module: "noop"
57+
58+ 59+ env :
60+ OPENAI_API_KEY : ${{ secrets.OPENAI_API_KEY }}
61+ with :
62+ path : ' .'
63+ isAbsolutePath : false
64+ file : ' run.yaml'
65+ content : |
66+ version: '2'
67+ image_name: simplest-llamastack-app
68+ apis:
69+ - agents
70+ - datasetio
71+ - eval
72+ - inference
73+ - post_training
74+ - safety
75+ - scoring
76+ - telemetry
77+ - tool_runtime
78+ - vector_io
79+ benchmarks: []
80+ container_image: null
81+ datasets: []
82+ external_providers_dir: null
83+ inference_store:
84+ db_path: /app-root/.llama/distributions/ollama/inference_store.db
85+ type: sqlite
86+ logging: null
87+ metadata_store:
88+ db_path: /app-root/.llama/distributions/ollama/registry.db
89+ namespace: null
90+ type: sqlite
91+ providers:
92+ agents:
93+ - config:
94+ persistence_store:
95+ db_path: /app-root/.llama/distributions/ollama/agents_store.db
96+ namespace: null
97+ type: sqlite
98+ responses_store:
99+ db_path: /app-root/.llama/distributions/ollama/responses_store.db
100+ type: sqlite
101+ provider_id: meta-reference
102+ provider_type: inline::meta-reference
103+ datasetio:
104+ - config:
105+ kvstore:
106+ db_path: /app-root/.llama/distributions/ollama/huggingface_datasetio.db
107+ namespace: null
108+ type: sqlite
109+ provider_id: huggingface
110+ provider_type: remote::huggingface
111+ - config:
112+ kvstore:
113+ db_path: /app-root/.llama/distributions/ollama/localfs_datasetio.db
114+ namespace: null
115+ type: sqlite
116+ provider_id: localfs
117+ provider_type: inline::localfs
118+ eval:
119+ - config:
120+ kvstore:
121+ db_path: /app-root/.llama/distributions/ollama/meta_reference_eval.db
122+ namespace: null
123+ type: sqlite
124+ provider_id: meta-reference
125+ provider_type: inline::meta-reference
126+ inference:
127+ - provider_id: openai
128+ provider_type: remote::openai
129+ config:
130+ api_key: ${{ env.OPENAI_API_KEY }}
131+ post_training:
132+ - config:
133+ checkpoint_format: huggingface
134+ device: cpu
135+ distributed_backend: null
136+ provider_id: huggingface
137+ provider_type: inline::huggingface
138+ safety:
139+ - config:
140+ excluded_categories: []
141+ provider_id: llama-guard
142+ provider_type: inline::llama-guard
143+ scoring:
144+ - config: {}
145+ provider_id: basic
146+ provider_type: inline::basic
147+ - config: {}
148+ provider_id: llm-as-judge
149+ provider_type: inline::llm-as-judge
150+ - config:
151+ openai_api_key: '******'
152+ provider_id: braintrust
153+ provider_type: inline::braintrust
154+ telemetry:
155+ - config:
156+ service_name: 'lightspeed-stack'
157+ sinks: sqlite
158+ sqlite_db_path: /app-root/.llama/distributions/ollama/trace_store.db
159+ provider_id: meta-reference
160+ provider_type: inline::meta-reference
161+ tool_runtime:
162+ - provider_id: model-context-protocol
163+ provider_type: remote::model-context-protocol
164+ config: {}
165+ vector_io:
166+ - config:
167+ kvstore:
168+ db_path: /app-root/.llama/distributions/ollama/faiss_store.db
169+ namespace: null
170+ type: sqlite
171+ provider_id: faiss
172+ provider_type: inline::faiss
173+ scoring_fns: []
174+ server:
175+ auth: null
176+ host: null
177+ port: 8321
178+ quota: null
179+ tls_cafile: null
180+ tls_certfile: null
181+ tls_keyfile: null
182+ shields: []
183+ vector_dbs: []
184+
185+ models:
186+ - model_id: gpt-4o-mini
187+ provider_id: openai
188+ model_type: llm
189+ provider_model_id: gpt-4o-mini
190+
191+ - name : list files
192+ run : |
193+ ls
194+ cat lightspeed-stack.yaml
195+ cat run.yaml
196+
197+ - name : Run service manually
198+ env :
199+ OPENAI_API_KEY : ${{ secrets.OPENAI_API_KEY }}
200+ run : |
201+ # Debug: Check if environment variable is available for docker-compose
202+ echo "OPENAI_API_KEY is set: $([ -n "$OPENAI_API_KEY" ] && echo 'YES' || echo 'NO')"
203+ echo "OPENAI_API_KEY length: ${#OPENAI_API_KEY}"
204+
205+ docker compose --version
206+ docker compose up -d
207+
208+ - name : Wait for services
209+ run : |
210+ echo "Waiting for services to be healthy..."
211+ sleep 20 # adjust depending on boot time
212+
213+ - name : Quick connectivity test
214+ run : |
215+ echo "Testing basic connectivity before full test suite..."
216+ curl -f http://localhost:8080/v1/models || {
217+ echo "❌ Basic connectivity failed - showing logs before running full tests"
218+ docker compose logs --tail=30
219+ exit 1
220+ }
221+
222+ - name : Run e2e tests
223+ run : |
224+ echo "Installing test dependencies..."
225+ pip install uv
226+ uv sync
227+
228+ echo "Running comprehensive e2e test suite..."
229+ make test-e2e
0 commit comments