diff --git a/scripts/ci_test.sh b/scripts/ci_test.sh index 15bfac51..4b3f8103 100755 --- a/scripts/ci_test.sh +++ b/scripts/ci_test.sh @@ -13,7 +13,7 @@ oc process -p IMAGE_NAME="$ASSISTED_CHAT_TEST" -p SSL_CLIENT_SECRET_NAME=assiste sleep 5 oc get pods -n "$NAMESPACE" -POD_NAME=$(oc get pods | tr -s ' ' | cut -d ' ' -f1 | grep assisted-chat-eval-tes) +POD_NAME=$(oc get pods -n "$NAMESPACE" | tr -s ' ' | cut -d ' ' -f1 | grep assisted-chat-eval-test) TIMEOUT=600 ELAPSED=0 @@ -24,7 +24,7 @@ while [ $ELAPSED -lt $TIMEOUT ]; do CURRENT_RESTARTS=$(oc get pod "$POD_NAME" -n "$NAMESPACE" -o=jsonpath='{.status.containerStatuses[0].restartCount}') if [[ $CURRENT_RESTARTS -gt 0 ]]; then echo "Pod ${POD_NAME} was restarted, so the tests should run at least once, exiting" - oc logs -n "$NAMESPACE" "$POD_NAME" + oc logs -p -n "$NAMESPACE" "$POD_NAME" || true exit "$(oc get pod "$POD_NAME" -n "$NAMESPACE" -o=jsonpath='{.status.containerStatuses[0].lastState.terminated.exitCode}')" fi if [[ "$CURRENT_STATUS" == "Succeeded" ]]; then diff --git a/template.yaml b/template.yaml index 2b4156c1..ea6b25e4 100644 --- a/template.yaml +++ b/template.yaml @@ -294,10 +294,11 @@ objects: - vector_io providers: inference: - - provider_id: gemini - provider_type: remote::gemini + - provider_id: vertex_ai + provider_type: remote::vertexai config: - api_key: dummy-to-stop-llama-stack-from-complaining-even-though-we-use-vertex-and-not-gemini-directly + project: "" + location: us-central1 vector_io: [] files: [] safety: [] @@ -358,7 +359,22 @@ objects: password: ${env.ASSISTED_CHAT_POSTGRES_PASSWORD} ssl_mode: ${LLAMA_STACK_POSTGRES_SSL_MODE} ca_cert_path: /etc/tls/ca-bundle.pem - models: [] + models: + - metadata: {} + model_id: vertex_ai/gemini-2.0-flash + provider_id: vertex_ai + provider_model_id: gemini-2.0-flash + model_type: llm + - metadata: {} + model_id: vertex_ai/gemini-2.5-pro + provider_id: vertex_ai + provider_model_id: gemini-2.5-pro + model_type: llm + - metadata: {} + model_id: vertex_ai/gemini-2.5-flash + provider_id: vertex_ai + provider_model_id: gemini-2.5-flash + model_type: llm shields: [] vector_dbs: [] datasets: [] diff --git a/test/prow/entrypoint.sh b/test/prow/entrypoint.sh index 88edba8f..798c3e07 100644 --- a/test/prow/entrypoint.sh +++ b/test/prow/entrypoint.sh @@ -9,10 +9,10 @@ OCM_TOKEN=$(curl -X POST https://sso.redhat.com/auth/realms/redhat-external/prot -H "Content-Type: application/x-www-form-urlencoded" \ -d "grant_type=client_credentials" \ -d "client_id=$CLIENT_ID" \ - -d "client_secret=$CLIENT_SECRET" | jq '.access_token') + -d "client_secret=$CLIENT_SECRET" | jq -r '.access_token') echo "$OCM_TOKEN" > test/evals/ocm_token.txt cd test/evals -#python eval.py --agent_endpoint "${AGENT_URL}:${AGENT_PORT}" +python eval.py --agent_endpoint "${AGENT_URL}:${AGENT_PORT}"