Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions .github/actions/process-test-results/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,18 @@ runs:
testing/trino-product-tests/target/*
logs/*
retention-days: 5
- name: Upload test report
uses: actions/upload-artifact@v4
# Always upload the test report for the upload-test-results.yml workflow,
# but only the single XML file to keep the artifact small
with:
# Name prefix is checked in the `Upload test results` workflow
name: test report ${{ inputs.artifact-name }}
if-no-files-found: 'ignore'
path: |
**/surefire-reports/TEST-*.xml
testing/trino-product-tests/target/reports/**/testng-results.xml
retention-days: 5
- name: Upload heap dump
uses: actions/upload-artifact@v4
if: format('{0}', inputs.upload-heap-dump) == 'true'
Expand Down
93 changes: 93 additions & 0 deletions .github/workflows/upload-test-results.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
name: Upload test results

on:
workflow_run:
workflows: ["ci", "docs"]
types:
- completed

defaults:
run:
shell: bash --noprofile --norc -euo pipefail {0}

jobs:
upload-to-s3:
if: github.secret_source != 'None' && github.event.workflow_run.conclusion != 'cancelled'
runs-on: ubuntu-latest
steps:
- name: 'Download artifact'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const opts = github.rest.actions.listWorkflowRunArtifacts.endpoint.merge({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: ${{github.event.workflow_run.id }},
});
const artifacts = await github.paginate(opts);
for (const artifact of artifacts) {
if (!artifact.name.startsWith('test report ')) {
continue;
}
const download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: artifact.id,
archive_format: 'zip',
});
fs.writeFileSync('${{github.workspace}}/' + artifact.name + '.zip', Buffer.from(download.data));
}
- run: |
for archive in *.zip; do
# $archive is literally *.zip if there are no zip files matching the glob expression
[ -f "$archive" ] || continue
name=$(basename "$archive" .zip)
mkdir "$name"
(cd "$name" && unzip ../"$archive")
done
- name: Upload test results to S3
env:
S3_BUCKET: ${{ vars.TEST_RESULTS_BUCKET }}
AWS_ACCESS_KEY_ID: ${{ vars.TEST_RESULTS_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.TEST_RESULTS_AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: us-east-2
if: env.S3_BUCKET != '' && env.AWS_ACCESS_KEY_ID != '' && env.AWS_SECRET_ACCESS_KEY != ''
shell: bash --noprofile --norc -euo pipefail {0}
run: |
# 1. Don't prefix attributes, because +@ (the default prefix) is not a valid character in nested row field names in the Hive connector for JSON files.
# 2. When converting to JSON, make sure 'testcase' is always an array: https://mikefarah.gitbook.io/yq/usage/xml#parse-xml-force-as-an-array
# 3. Remove system-err and system-out, because they cannot be easily parsed and add significant bloat, making storing and processing the data much more costly.
# 4. Remove properties, because they leak secret values.
# 5. Truncate all strings to 1k characters to avoid having lines longer than 100MB.
yq_opts=(
--input-format=xml
--output-format=json
--xml-attribute-prefix=''
--xml-content-name='content'
--xml-skip-directives
--xml-skip-proc-inst
'.testsuite.testcase |= ([] + .) | .testsuite.testcase[] |= del(.system-err, .system-out) | .testsuite |= del(.properties) | .. |= select(tag == "!!str") |= sub("(.{0,1000}).*", "${1}")'
Comment thread
nineinchnick marked this conversation as resolved.
)
artifact_id='${{ github.event.workflow_run.id }}-${{ github.event.workflow_run.run_attempt }}.json.gz'

find . \
-name TEST-\*.xml \
-exec yq "${yq_opts[@]}" {} \; \
| jq -c > surefire.ndjson
find . \
-name testng-results.xml \
-exec yq "${yq_opts[@]}" {} \; \
| jq -c > testng.ndjson

for filename in *.ndjson; do
if [ ! -s "$filename" ]; then
continue;
fi
jq -c \
--argjson addObj '{"branch":"${{ github.event.workflow_run.head_branch }}","git_sha":"${{ github.event.workflow_run.head_sha }}","workflow_name":"${{ github.event.workflow.name }}","workflow_run":"${{ github.event.workflow_run.id }}","workflow_conclusion":"${{ github.event.workflow_run.conclusion }}","workflow_job":"","workflow_run_attempt":"${{ github.event.workflow_run.run_attempt }}","timestamp":""}' \
--arg timestamp "$(date -u '+%F %T.%3NZ')" \
'. + $addObj | .timestamp=$timestamp' "$filename" | gzip -c > "$artifact_id"

aws s3 cp --no-progress "$artifact_id" "s3://$S3_BUCKET/tests/results/type=$(basename "$filename" .ndjson)/repo=$(basename "${{ github.repository }}")/date_created=$(date -u '+%Y-%m-%d')/$artifact_id"
done