Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/actions/verify-metrics-snapshot/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ runs:
shell: bash
run: |
python3 -m pip install prometheus-client
if python3 ./scripts/e2e/compare_metrics.py --file1 ./.metrics/${{ inputs.snapshot }}.txt --file2 ./.metrics/baseline_${{ inputs.snapshot }}.txt --output ./.metrics/diff_${{ inputs.snapshot }}.txt; then
if python3 ./scripts/e2e/compare_metrics.py --current ./.metrics/${{ inputs.snapshot }}.txt --baseline ./.metrics/baseline_${{ inputs.snapshot }}.txt --output ./.metrics/diff_${{ inputs.snapshot }}.txt; then
echo "No differences found in metrics"
else
echo "🛑 Differences found in metrics"
Expand Down
47 changes: 41 additions & 6 deletions .github/scripts/ci-summary-report-publish.js
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,12 @@ function sanitizeSnapshots(raw) {
if (clean) names.push(clean);
}
}
result.push({ snapshot, added, removed, modified, metric_names: names });
// Validate artifact_id (optional, non-negative integer for the diff artifact download link)
const artifactId = safeNum(entry.artifact_id);
// artifact_id must be a positive integer (GitHub artifact IDs are always > 0)
const sanitizedArtifactId = (artifactId !== null && Number.isInteger(artifactId) && artifactId > 0)
? artifactId : null;
result.push({ snapshot, added, removed, modified, metric_names: names, artifact_id: sanitizedArtifactId });
Comment on lines +100 to +105
Copy link

Copilot AI Apr 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The inline comment says artifact_id is a “non-negative integer”, but the code requires artifact_id > 0 (and the following comment notes GitHub IDs are always > 0). Update the comment to match the actual validation (positive integer) to avoid confusion during future security reviews.

Copilot uses AI. Check for mistakes.
}
return result.length > 0 ? result : null;
}
Expand Down Expand Up @@ -158,9 +163,14 @@ function computeCoverage(s) {
* All text is built from trusted templates; metric names have been validated
* through sanitizeMetricName() and are rendered in backtick-code spans.
* @param {Array|null} snapshots - Sanitized snapshots from computeMetrics
* @param {string} [ciRunUrl] - URL to the CI run for linking to step logs
* @param {string} [artifactUrlPrefix] - Base URL for artifact downloads:
* `https://github.com/{owner}/{repo}/actions/runs/{runId}/artifacts`.
* When provided, each snapshot with a valid artifact_id gets a direct
* download link appended to its header line.
* @returns {string} - Markdown detail block, or empty string if no data
*/
function formatMetricsDetail(snapshots) {
function formatMetricsDetail(snapshots, { ciRunUrl, artifactUrlPrefix } = {}) {
if (!snapshots || snapshots.length === 0) return '';

const lines = [
Expand All @@ -170,8 +180,18 @@ function formatMetricsDetail(snapshots) {
'',
];

if (ciRunUrl) {
Comment thread
yurishkuro marked this conversation as resolved.
lines.push(`_For label-level diff details, open the [CI run](${ciRunUrl}) and expand the "Compare metrics and generate summary" step logs._`);
lines.push('');
}

for (const snap of snapshots) {
lines.push(`**${snap.snapshot}**`);
// Build the snapshot header, optionally with a direct diff artifact download link.
let snapHeader = `**${snap.snapshot}**`;
if (artifactUrlPrefix && snap.artifact_id !== null && snap.artifact_id !== undefined) {
snapHeader += ` — [⬇️ download diff](${artifactUrlPrefix}/${snap.artifact_id})`;
}
lines.push(snapHeader);
const parts = [];
if (snap.added !== null && snap.added > 0) parts.push(`${snap.added} added`);
if (snap.removed !== null && snap.removed > 0) parts.push(`${snap.removed} removed`);
Expand Down Expand Up @@ -201,9 +221,11 @@ function formatMetricsDetail(snapshots) {
* @param {string} footer - links + timestamp line
* @param {object} [opts]
* @param {Array|null} [opts.metricsSnapshots] - sanitized snapshot data for detail rendering
* @param {string} [opts.ciRunUrl] - URL to the CI run, passed through to formatMetricsDetail
* @param {string} [opts.artifactUrlPrefix] - Base URL for artifact downloads, passed through
* @returns {string}
*/
function buildCommentBody(metricsText, coverageText, footer, { metricsSnapshots } = {}) {
function buildCommentBody(metricsText, coverageText, footer, { metricsSnapshots, ciRunUrl, artifactUrlPrefix } = {}) {
const parts = [
COMMENT_TAG,
'## CI Summary Report',
Expand All @@ -212,7 +234,7 @@ function buildCommentBody(metricsText, coverageText, footer, { metricsSnapshots
metricsText,
];

const detail = formatMetricsDetail(metricsSnapshots);
const detail = formatMetricsDetail(metricsSnapshots, { ciRunUrl, artifactUrlPrefix });
if (detail) {
parts.push(detail);
}
Expand Down Expand Up @@ -301,6 +323,8 @@ async function postOrUpdateComment(github, owner, repo, prNumber, body, core, {
* @param {string} opts.inputs.prNumber - raw string from step output
* @param {string} opts.inputs.ciRunUrl
* @param {string} opts.inputs.publishUrl
* @param {string} [opts.inputs.sourceRunId] - numeric run ID of the CI Orchestrator run;
* used to construct direct artifact download URLs (e.g. .../artifacts/{id})
*/
async function handler({ github, core, fs, inputs }) {
const { owner, repo, headSha, ciRunUrl, publishUrl } = inputs;
Expand All @@ -310,6 +334,13 @@ async function handler({ github, core, fs, inputs }) {
const ts = new Date().toISOString().replace('T', ' ').replace(/\.\d+Z$/, ' UTC');
const footer = `${links}\n_${ts}_`;

// Construct the artifact URL prefix for per-snapshot diff download links.
// All parts come from trusted sources: owner/repo from context, runId validated as integer.
const sourceRunId = parseInt(inputs.sourceRunId, 10) || null;
const artifactUrlPrefix = (owner && repo && sourceRunId)
? `https://github.com/${owner}/${repo}/actions/runs/${sourceRunId}/artifacts`
: null;

// Read structured data written by ci-summary-report.yml.
// All fields are primitives (enums, numbers, booleans) — no free-form text.
let s;
Expand Down Expand Up @@ -349,7 +380,11 @@ async function handler({ github, core, fs, inputs }) {
// after a green run. Only create a new comment when there is something to report.
const hasIssues = metrics.conclusion === 'failure' || coverage.conclusion === 'failure'
|| metrics.totalChanges > 0;
const body = buildCommentBody(metrics.text, coverage.text, footer, { metricsSnapshots: metrics.snapshots });
const body = buildCommentBody(metrics.text, coverage.text, footer, {
metricsSnapshots: metrics.snapshots,
ciRunUrl,
artifactUrlPrefix,
});
await postOrUpdateComment(github, owner, repo, prNumber, body, core, { createNew: hasIssues });
} else {
core.info('No PR number; skipping PR comment.');
Expand Down
114 changes: 108 additions & 6 deletions .github/scripts/ci-summary-report-publish.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,44 @@ describe('sanitizeSnapshots', () => {
expect(result).toHaveLength(1);
expect(result[0].snapshot).toBe('valid');
});

test('sanitizes artifact_id as a positive integer', () => {
const input = [{
snapshot: 'test_snapshot',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
artifact_id: 6359406281,
}];
const result = sanitizeSnapshots(input);
expect(result[0].artifact_id).toBe(6359406281);
});

test('rejects non-integer artifact_id', () => {
const input = [{
snapshot: 'test_snapshot',
added: 1, removed: 0, modified: 0,
metric_names: [],
artifact_id: 'evil_string',
}];
const result = sanitizeSnapshots(input);
expect(result[0].artifact_id).toBeNull();
});

test('rejects zero and negative artifact_id', () => {
const input = [
{ snapshot: 'snap_a', added: 0, removed: 0, modified: 0, metric_names: [], artifact_id: 0 },
{ snapshot: 'snap_b', added: 0, removed: 0, modified: 0, metric_names: [], artifact_id: -1 },
];
const result = sanitizeSnapshots(input);
expect(result[0].artifact_id).toBeNull();
expect(result[1].artifact_id).toBeNull();
});

test('sets artifact_id to null when absent', () => {
const input = [{ snapshot: 'test_snapshot', added: 1, removed: 0, modified: 0, metric_names: [] }];
const result = sanitizeSnapshots(input);
expect(result[0].artifact_id).toBeNull();
});
});

// ── computeMetrics ────────────────────────────────────────────────────────────
Expand Down Expand Up @@ -285,14 +323,53 @@ describe('formatMetricsDetail', () => {
expect(detail).toContain('3 removed');
});

test('omits zero counts from summary line', () => {
test('includes link to CI run when ciRunUrl provided', () => {
const detail = formatMetricsDetail([{
snapshot: 'test', added: 0, removed: 5, modified: 0,
metric_names: [],
snapshot: 'cassandra_v2',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
}], { ciRunUrl: 'https://github.com/org/repo/actions/runs/123' });
expect(detail).toContain('[CI run](https://github.com/org/repo/actions/runs/123)');
expect(detail).toContain('Compare metrics and generate summary');
});

test('omits CI run link when ciRunUrl not provided', () => {
const detail = formatMetricsDetail([{
snapshot: 'cassandra_v2',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
}]);
expect(detail).not.toContain('added');
expect(detail).toContain('5 removed');
expect(detail).not.toContain('modified');
expect(detail).not.toContain('[CI run]');
});

test('renders per-snapshot artifact download link when artifactUrlPrefix and artifact_id provided', () => {
const detail = formatMetricsDetail([{
snapshot: 'cassandra_v2',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
artifact_id: 6359406281,
}], { artifactUrlPrefix: 'https://github.com/org/repo/actions/runs/123/artifacts' });
expect(detail).toContain('[⬇️ download diff](https://github.com/org/repo/actions/runs/123/artifacts/6359406281)');
});

test('omits artifact download link when artifact_id is null', () => {
const detail = formatMetricsDetail([{
snapshot: 'cassandra_v2',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
artifact_id: null,
}], { artifactUrlPrefix: 'https://github.com/org/repo/actions/runs/123/artifacts' });
expect(detail).not.toContain('download diff');
});

test('omits artifact download link when artifactUrlPrefix not provided', () => {
const detail = formatMetricsDetail([{
snapshot: 'cassandra_v2',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
artifact_id: 6359406281,
}]);
expect(detail).not.toContain('download diff');
});
});

Expand Down Expand Up @@ -420,6 +497,31 @@ describe('buildCommentBody', () => {
expect(metricsPos).toBeLessThan(detailsPos);
expect(detailsPos).toBeLessThan(coveragePos);
});

test('includes CI run link in metrics detail when ciRunUrl provided', () => {
const snapshots = [{
snapshot: 'cassandra_v2',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
artifact_id: null,
}];
const ciRunUrl = 'https://github.com/org/repo/actions/runs/999';
const body = buildCommentBody('❌ 1 metric change(s) detected', coverageText, footer, { metricsSnapshots: snapshots, ciRunUrl });
expect(body).toContain(`[CI run](${ciRunUrl})`);
expect(body).toContain('Compare metrics and generate summary');
});

test('includes per-snapshot artifact download link when artifactUrlPrefix and artifact_id provided', () => {
const snapshots = [{
snapshot: 'cassandra_v2',
added: 1, removed: 0, modified: 0,
metric_names: ['http_server_duration'],
artifact_id: 6359406281,
}];
const artifactUrlPrefix = 'https://github.com/org/repo/actions/runs/999/artifacts';
const body = buildCommentBody('❌ 1 metric change(s) detected', coverageText, footer, { metricsSnapshots: snapshots, artifactUrlPrefix });
expect(body).toContain(`[⬇️ download diff](${artifactUrlPrefix}/6359406281)`);
});
});

// ── postCheckRun ──────────────────────────────────────────────────────────────
Expand Down
13 changes: 7 additions & 6 deletions .github/workflows/ci-summary-report-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -116,12 +116,13 @@ jobs:
await handler({
github, core, fs: require('fs'),
inputs: {
owner: context.repo.owner,
repo: context.repo.repo,
headSha: '${{ steps.source-run.outputs.head_sha }}',
prNumber: '${{ steps.source-run.outputs.pr_number || steps.pr.outputs.pr_number }}',
ciRunUrl: '${{ steps.source-run.outputs.source_run_url }}',
publishUrl: '${{ steps.source-run.outputs.summary_run_url }}',
owner: context.repo.owner,
repo: context.repo.repo,
headSha: '${{ steps.source-run.outputs.head_sha }}',
prNumber: '${{ steps.source-run.outputs.pr_number || steps.pr.outputs.pr_number }}',
ciRunUrl: '${{ steps.source-run.outputs.source_run_url }}',
publishUrl: '${{ steps.source-run.outputs.summary_run_url }}',
sourceRunId: '${{ steps.source-run.outputs.run_id }}',
},
});
env:
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/ci-summary-report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ jobs:

- name: Compare metrics and generate summary
id: compare-metrics
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: bash ./scripts/e2e/metrics_summary.sh

Expand Down
51 changes: 28 additions & 23 deletions scripts/e2e/compare_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def parse_metrics(content):
return metrics,metrics_exclusion_count


def generate_diff(file1_content, file2_content):
def generate_diff(baseline_content, current_content):
"""Compare two Prometheus metrics snapshots and return a unified diff of metric names.

The input files are raw Prometheus text exposition format, scraped directly from
Expand All @@ -132,21 +132,26 @@ def generate_diff(file1_content, file2_content):
The diff is performed on these sorted, value-free metric strings. If the two
snapshots produce the same set of strings the diff is empty and this function
returns ''. When there are differences, the return value is a unified diff
using the standard convention:
- lines = present in baseline but absent from current → regression/removed
+ lines = present in current but absent from baseline → newly added
followed by optional comment lines reporting how many metrics were excluded, e.g.:
# Metrics excluded from A: 3
# Metrics excluded from B: 5
# Metrics excluded from baseline: 3
# Metrics excluded from current: 5
These comment lines (prefixed with `# `) are appended only when the diff is
non-empty; they are informational context, not metric differences themselves.
"""
if isinstance(file1_content, list):
file1_content = ''.join(file1_content)
if isinstance(file2_content, list):
file2_content = ''.join(file2_content)
if isinstance(baseline_content, list):
baseline_content = ''.join(baseline_content)
if isinstance(current_content, list):
current_content = ''.join(current_content)

metrics1,excluded_metrics_count1 = parse_metrics(file1_content)
metrics2,excluded_metrics_count2 = parse_metrics(file2_content)
baseline_metrics, excluded_count_baseline = parse_metrics(baseline_content)
current_metrics, excluded_count_current = parse_metrics(current_content)

diff = list(unified_diff(metrics1, metrics2, lineterm='', n=0))
# unified_diff(baseline, current): - = in baseline but not current (removed/regression),
# + = in current but not baseline (newly added).
diff = list(unified_diff(baseline_metrics, current_metrics, lineterm='', n=0))

# Exclusion counts are informational context appended to the diff output.
# They must not be written when the diff itself is empty: two snapshots with
Expand All @@ -155,12 +160,12 @@ def generate_diff(file1_content, file2_content):
if len(diff) == 0:
return ''

total_excluded = excluded_metrics_count1 + excluded_metrics_count2
total_excluded = excluded_count_baseline + excluded_count_current

exclusion_lines = ''
if total_excluded > 0:
exclusion_lines = f'\n# Metrics excluded from A: {excluded_metrics_count1}\n# Metrics excluded from B: {excluded_metrics_count2}'
exclusion_lines = f'\n# Metrics excluded from baseline: {excluded_count_baseline}\n# Metrics excluded from current: {excluded_count_current}'

return '\n'.join(diff) + exclusion_lines

def write_diff_file(diff_lines, output_path):
Expand All @@ -172,20 +177,20 @@ def write_diff_file(diff_lines, output_path):

def main():
parser = argparse.ArgumentParser(description='Generate diff between two Jaeger metric files')
parser.add_argument('--file1', help='Path to first metric file')
parser.add_argument('--file2', help='Path to second metric file')
parser.add_argument('--current', help='Path to the current metric file (e.g. from the PR)')
parser.add_argument('--baseline', help='Path to the baseline metric file (e.g. from main branch)')
parser.add_argument('--output', '-o', default='metrics_diff.txt',
help='Output diff file path (default: metrics_diff.txt)')

args = parser.parse_args()

# Read input files
file1_lines = read_metric_file(args.file1)
file2_lines = read_metric_file(args.file2)
baseline_lines = read_metric_file(args.baseline)
current_lines = read_metric_file(args.current)

Comment on lines 179 to +190
Copy link

Copilot AI Apr 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new CLI flags --baseline/--current are not marked required and there’s no explicit validation before calling read_metric_file(args.baseline/current). If a caller forgets one flag, this will raise a less-clear exception (attempting to open None). Consider setting required=True on both arguments (or adding a friendly check + parser.error) to produce a clear usage error.

Copilot uses AI. Check for mistakes.
# Generate diff
diff_lines = generate_diff(file1_lines, file2_lines)
diff_lines = generate_diff(baseline_lines, current_lines)

# Check if there are any differences
if diff_lines:
print("differences found between the metric files.")
Expand Down
Loading
Loading