Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(perf): allow sharding efps tests #7592

Merged
merged 2 commits into from
Oct 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 62 additions & 9 deletions .github/workflows/efps.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,18 @@ on:
default: false

jobs:
efps:
efps-test:
timeout-minutes: 30
runs-on: ubuntu-latest
env:
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
TURBO_TEAM: ${{ vars.TURBO_TEAM }}
strategy:
fail-fast: false
matrix:
# Add more shards here if needed
shardIndex: [1, 2, 3]
shardTotal: [3]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
Expand Down Expand Up @@ -84,18 +90,65 @@ jobs:
REFERENCE_TAG: ${{ github.event.inputs.reference_tag || 'latest' }}
ENABLE_PROFILER: ${{ github.event.inputs.enable_profiler || false }}
RECORD_VIDEO: ${{ github.event.inputs.record_video || false }}
run: pnpm efps:test
run: pnpm efps:test -- -- --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }}

- uses: actions/upload-artifact@v3
if: always()
with:
name: efps-report
path: ${{ github.workspace }}/perf/efps/results/
retention-days: 30

merge-reports:
if: always()
needs: [efps-test]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 18

- uses: pnpm/action-setup@v4
name: Install pnpm
id: pnpm-install
with:
run_install: false

- name: Get pnpm store directory
id: pnpm-cache
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT

- name: Cache node modules
id: cache-node-modules
uses: actions/cache@v4
env:
cache-name: cache-node-modules
with:
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ env.cache-name }}-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
v1-${{ runner.os }}-pnpm-store-${{ env.cache-name }}-
v1-${{ runner.os }}-pnpm-store-
v1-${{ runner.os }}-

- name: Install project dependencies
run: pnpm install

- name: Download blob reports from Github Actions Artifacts
uses: actions/download-artifact@v3
with:
name: efps-report
path: perf/efps/results

- name: Write report
run: pnpm efps:write:report

- name: PR comment with report
uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2
if: ${{ github.event_name == 'pull_request' }}
with:
comment_tag: "efps-report"
filePath: ${{ github.workspace }}/perf/efps/results/benchmark-results.md

- uses: actions/upload-artifact@v3
if: always()
with:
name: efps-report
path: perf/efps/results
retention-days: 30
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
"e2e:start": "pnpm --filter studio-e2e-testing preview",
"etl": "node -r dotenv-flow/config -r esbuild-register scripts/etl",
"efps:test": "cd perf/efps && pnpm test",
"efps:write:report": "cd perf/efps && pnpm write:report",
"example:blog-studio": "cd examples/blog-studio && pnpm start",
"example:clean-studio": "cd examples/blog-studio && pnpm start",
"example:ecommerce-studio": "cd examples/blog-studio && pnpm start",
Expand Down
9 changes: 9 additions & 0 deletions perf/efps/.eslintrc.cjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
const path = require('node:path')

const ROOT_PATH = path.resolve(__dirname, '../../..')

module.exports = {
rules: {
'import/no-extraneous-dependencies': ['error', {packageDir: [ROOT_PATH, __dirname]}],
},
}
159 changes: 159 additions & 0 deletions perf/efps/formatefpsResult.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
import fs from 'node:fs'
import path from 'node:path'
import {fileURLToPath} from 'node:url'

import {type EfpsAbResult} from './types'
import {formatPercentageChange, isSignificantlyDifferent} from './utils'

// eslint-disable-next-line turbo/no-undeclared-env-vars
const REFERENCE_TAG = process.env.REFERENCE_TAG || 'latest'

let comparisonTable = `
| Benchmark | reference<br/><sup>latency of \`sanity@${REFERENCE_TAG}\`</sup> | experiment<br/><sup>latency of this branch</sup> | Δ (%)<br/><sup>latency difference</sup> | |
| :-- | :-- | :-- | :-- | --- |
`

const detailedInformationHeader = `
| Benchmark | latency | p75 | p90 | p99 | blocking time | test duration |
| --------- | ------: | --: | --: | --: | ------------: | ------------: |
`

let referenceTable = detailedInformationHeader
let experimentTable = detailedInformationHeader

// For markdown formatting without colors
const formatEfpsPlain = (latencyMs: number) => {
const efps = 1000 / latencyMs
const rounded = efps.toFixed(1)

if (efps >= 100) return '99.9+'
return rounded
}

const workspaceDir = path.dirname(fileURLToPath(import.meta.url))

async function writeEPSResults() {
// read all the test results from the report directory
const reportDir = path.join(workspaceDir, 'results', 'report')

const jsonFiles = await fs.promises.readdir(reportDir)
const testResults: Array<{
name: string
results: EfpsAbResult[]
}> = []
for (const jsonFile of jsonFiles) {
const json = await fs.promises.readFile(path.join(reportDir, jsonFile), 'utf-8')
const parsedJson = JSON.parse(json) as {name: string; results: EfpsAbResult[]}
testResults.push(parsedJson)
}

for (const {name, results} of testResults.flat()) {
for (const {experiment, reference} of results) {
const significantlyDifferent = isSignificantlyDifferent(
experiment.latency.p50,
reference.latency.p50,
)

const sign = experiment.latency.p50 >= reference.latency.p50 ? '+' : ''
const msDifference = `${sign}${(experiment.latency.p50 - reference.latency.p50).toFixed(0)}ms`
const percentageChange = formatPercentageChange(experiment.latency.p50, reference.latency.p50)

const benchmarkName = `${name} (${experiment.label})`

comparisonTable +=
// benchmark name
`| ${benchmarkName} ` +
// reference latency
`| ${formatEfpsPlain(reference.latency.p50)} efps (${reference.latency.p50.toFixed(0)}ms) ` +
// experiment latency
`| ${formatEfpsPlain(experiment.latency.p50)} efps (${experiment.latency.p50.toFixed(0)}ms) ` +
// difference
`| ${msDifference} (${percentageChange}) ` +
// status
`| ${significantlyDifferent ? '🔴' : '✅'} ` +
`|\n`

referenceTable +=
// benchmark name
`| ${benchmarkName} ` +
// latency
`| ${reference.latency.p50.toFixed(0)}ms ` +
// p75
`| ${reference.latency.p75.toFixed(0)}ms ` +
// p90
`| ${reference.latency.p90.toFixed(0)}ms ` +
// p99
`| ${reference.latency.p99.toFixed(0)}ms ` +
// blocking time
`| ${reference.blockingTime.toFixed(0)}ms ` +
// test duration
`| ${(reference.runDuration / 1000).toFixed(1)}s ` +
`|\n`

experimentTable +=
// benchmark name
`| ${benchmarkName} ` +
// latency
`| ${experiment.latency.p50.toFixed(0)}ms ` +
// p75
`| ${experiment.latency.p75.toFixed(0)}ms ` +
// p90
`| ${experiment.latency.p90.toFixed(0)}ms ` +
// p99
`| ${experiment.latency.p99.toFixed(0)}ms ` +
// blocking time
`| ${experiment.blockingTime.toFixed(0)}ms ` +
// test duration
`| ${(experiment.runDuration / 1000).toFixed(1)}s ` +
`|\n`
}
}

const markdown = `### ⚡️ Editor Performance Report

Updated ${new Date().toUTCString()}

${comparisonTable}

> **efps** — editor "frames per second". The number of updates assumed to be possible within a second.
>
> Derived from input latency. \`efps = 1000 / input_latency\`

<details>

<summary><strong>Detailed information</strong></summary>

### 🏠 Reference result

The performance result of \`sanity@${REFERENCE_TAG}\`


${referenceTable}

### 🧪 Experiment result

The performance result of this branch

${experimentTable}

### 📚 Glossary

> #### column definitions
>
> - **benchmark** — the name of the test, e.g. "article", followed by the label of the field being measured, e.g. "(title)".
> - **latency** — the time between when a key was pressed and when it was rendered. derived from a set of samples. the median (p50) is shown to show the most common latency.
> - **p75** — the 75th percentile of the input latency in the test run. 75% of the sampled inputs in this benchmark were processed faster than this value. this provides insight into the upper range of typical performance.
> - **p90** — the 90th percentile of the input latency in the test run. 90% of the sampled inputs were faster than this. this metric helps identify slower interactions that occurred less frequently during the benchmark.
> - **p99** — the 99th percentile of the input latency in the test run. only 1% of sampled inputs were slower than this. this represents the worst-case scenarios encountered during the benchmark, useful for identifying potential performance outliers.
> - **blocking time** — the total time during which the main thread was blocked, preventing user input and UI updates. this metric helps identify performance bottlenecks that may cause the interface to feel unresponsive.
> - **test duration** — how long the test run took to complete.

</details>
`

// Write markdown file to root of results
const markdownOutputPath = path.join(workspaceDir, 'results', 'benchmark-results.md')
await fs.promises.writeFile(markdownOutputPath, markdown)
}

writeEPSResults()
Loading
Loading