diff --git a/.github/workflows/efps.yml b/.github/workflows/efps.yml
index 7ed7e2c130c..bae4f135cd4 100644
--- a/.github/workflows/efps.yml
+++ b/.github/workflows/efps.yml
@@ -19,12 +19,18 @@ on:
default: false
jobs:
- efps:
+ efps-test:
timeout-minutes: 30
runs-on: ubuntu-latest
env:
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
TURBO_TEAM: ${{ vars.TURBO_TEAM }}
+ strategy:
+ fail-fast: false
+ matrix:
+ # Add more shards here if needed
+ shardIndex: [1, 2, 3]
+ shardTotal: [3]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
@@ -84,7 +90,61 @@ jobs:
REFERENCE_TAG: ${{ github.event.inputs.reference_tag || 'latest' }}
ENABLE_PROFILER: ${{ github.event.inputs.enable_profiler || false }}
RECORD_VIDEO: ${{ github.event.inputs.record_video || false }}
- run: pnpm efps:test
+ run: pnpm efps:test -- -- --shard ${{ matrix.shardIndex }}/${{ matrix.shardTotal }}
+
+ - uses: actions/upload-artifact@v3
+ if: always()
+ with:
+ name: efps-report
+ path: ${{ github.workspace }}/perf/efps/results/
+ retention-days: 30
+
+ merge-reports:
+ if: always()
+ needs: [efps-test]
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 18
+
+ - uses: pnpm/action-setup@v4
+ name: Install pnpm
+ id: pnpm-install
+ with:
+ run_install: false
+
+ - name: Get pnpm store directory
+ id: pnpm-cache
+ shell: bash
+ run: |
+ echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT
+
+ - name: Cache node modules
+ id: cache-node-modules
+ uses: actions/cache@v4
+ env:
+ cache-name: cache-node-modules
+ with:
+ path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
+ key: ${{ runner.os }}-pnpm-store-${{ env.cache-name }}-${{ hashFiles('**/pnpm-lock.yaml') }}
+ restore-keys: |
+ v1-${{ runner.os }}-pnpm-store-${{ env.cache-name }}-
+ v1-${{ runner.os }}-pnpm-store-
+ v1-${{ runner.os }}-
+
+ - name: Install project dependencies
+ run: pnpm install
+
+ - name: Download blob reports from Github Actions Artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: efps-report
+ path: perf/efps/results
+
+ - name: Write report
+ run: pnpm efps:write:report
- name: PR comment with report
uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2
@@ -92,10 +152,3 @@ jobs:
with:
comment_tag: "efps-report"
filePath: ${{ github.workspace }}/perf/efps/results/benchmark-results.md
-
- - uses: actions/upload-artifact@v3
- if: always()
- with:
- name: efps-report
- path: perf/efps/results
- retention-days: 30
diff --git a/package.json b/package.json
index 2093a9a5f07..2559209638f 100644
--- a/package.json
+++ b/package.json
@@ -55,6 +55,7 @@
"e2e:start": "pnpm --filter studio-e2e-testing preview",
"etl": "node -r dotenv-flow/config -r esbuild-register scripts/etl",
"efps:test": "cd perf/efps && pnpm test",
+ "efps:write:report": "cd perf/efps && pnpm write:report",
"example:blog-studio": "cd examples/blog-studio && pnpm start",
"example:clean-studio": "cd examples/blog-studio && pnpm start",
"example:ecommerce-studio": "cd examples/blog-studio && pnpm start",
diff --git a/perf/efps/.eslintrc.cjs b/perf/efps/.eslintrc.cjs
new file mode 100644
index 00000000000..c9a446c5adf
--- /dev/null
+++ b/perf/efps/.eslintrc.cjs
@@ -0,0 +1,9 @@
+const path = require('node:path')
+
+const ROOT_PATH = path.resolve(__dirname, '../../..')
+
+module.exports = {
+ rules: {
+ 'import/no-extraneous-dependencies': ['error', {packageDir: [ROOT_PATH, __dirname]}],
+ },
+}
diff --git a/perf/efps/formatefpsResult.ts b/perf/efps/formatefpsResult.ts
new file mode 100644
index 00000000000..b2e144d7d1a
--- /dev/null
+++ b/perf/efps/formatefpsResult.ts
@@ -0,0 +1,159 @@
+import fs from 'node:fs'
+import path from 'node:path'
+import {fileURLToPath} from 'node:url'
+
+import {type EfpsAbResult} from './types'
+import {formatPercentageChange, isSignificantlyDifferent} from './utils'
+
+// eslint-disable-next-line turbo/no-undeclared-env-vars
+const REFERENCE_TAG = process.env.REFERENCE_TAG || 'latest'
+
+let comparisonTable = `
+| Benchmark | reference
latency of \`sanity@${REFERENCE_TAG}\` | experiment
latency of this branch | Ξ (%)
latency difference | |
+| :-- | :-- | :-- | :-- | --- |
+`
+
+const detailedInformationHeader = `
+| Benchmark | latency | p75 | p90 | p99 | blocking time | test duration |
+| --------- | ------: | --: | --: | --: | ------------: | ------------: |
+`
+
+let referenceTable = detailedInformationHeader
+let experimentTable = detailedInformationHeader
+
+// For markdown formatting without colors
+const formatEfpsPlain = (latencyMs: number) => {
+ const efps = 1000 / latencyMs
+ const rounded = efps.toFixed(1)
+
+ if (efps >= 100) return '99.9+'
+ return rounded
+}
+
+const workspaceDir = path.dirname(fileURLToPath(import.meta.url))
+
+async function writeEPSResults() {
+ // read all the test results from the report directory
+ const reportDir = path.join(workspaceDir, 'results', 'report')
+
+ const jsonFiles = await fs.promises.readdir(reportDir)
+ const testResults: Array<{
+ name: string
+ results: EfpsAbResult[]
+ }> = []
+ for (const jsonFile of jsonFiles) {
+ const json = await fs.promises.readFile(path.join(reportDir, jsonFile), 'utf-8')
+ const parsedJson = JSON.parse(json) as {name: string; results: EfpsAbResult[]}
+ testResults.push(parsedJson)
+ }
+
+ for (const {name, results} of testResults.flat()) {
+ for (const {experiment, reference} of results) {
+ const significantlyDifferent = isSignificantlyDifferent(
+ experiment.latency.p50,
+ reference.latency.p50,
+ )
+
+ const sign = experiment.latency.p50 >= reference.latency.p50 ? '+' : ''
+ const msDifference = `${sign}${(experiment.latency.p50 - reference.latency.p50).toFixed(0)}ms`
+ const percentageChange = formatPercentageChange(experiment.latency.p50, reference.latency.p50)
+
+ const benchmarkName = `${name} (${experiment.label})`
+
+ comparisonTable +=
+ // benchmark name
+ `| ${benchmarkName} ` +
+ // reference latency
+ `| ${formatEfpsPlain(reference.latency.p50)} efps (${reference.latency.p50.toFixed(0)}ms) ` +
+ // experiment latency
+ `| ${formatEfpsPlain(experiment.latency.p50)} efps (${experiment.latency.p50.toFixed(0)}ms) ` +
+ // difference
+ `| ${msDifference} (${percentageChange}) ` +
+ // status
+ `| ${significantlyDifferent ? 'π΄' : 'β
'} ` +
+ `|\n`
+
+ referenceTable +=
+ // benchmark name
+ `| ${benchmarkName} ` +
+ // latency
+ `| ${reference.latency.p50.toFixed(0)}ms ` +
+ // p75
+ `| ${reference.latency.p75.toFixed(0)}ms ` +
+ // p90
+ `| ${reference.latency.p90.toFixed(0)}ms ` +
+ // p99
+ `| ${reference.latency.p99.toFixed(0)}ms ` +
+ // blocking time
+ `| ${reference.blockingTime.toFixed(0)}ms ` +
+ // test duration
+ `| ${(reference.runDuration / 1000).toFixed(1)}s ` +
+ `|\n`
+
+ experimentTable +=
+ // benchmark name
+ `| ${benchmarkName} ` +
+ // latency
+ `| ${experiment.latency.p50.toFixed(0)}ms ` +
+ // p75
+ `| ${experiment.latency.p75.toFixed(0)}ms ` +
+ // p90
+ `| ${experiment.latency.p90.toFixed(0)}ms ` +
+ // p99
+ `| ${experiment.latency.p99.toFixed(0)}ms ` +
+ // blocking time
+ `| ${experiment.blockingTime.toFixed(0)}ms ` +
+ // test duration
+ `| ${(experiment.runDuration / 1000).toFixed(1)}s ` +
+ `|\n`
+ }
+ }
+
+ const markdown = `### β‘οΈ Editor Performance Report
+
+Updated ${new Date().toUTCString()}
+
+${comparisonTable}
+
+> **efps** β editor "frames per second". The number of updates assumed to be possible within a second.
+>
+> Derived from input latency. \`efps = 1000 / input_latency\`
+
+
+
+Detailed information
+
+### π Reference result
+
+The performance result of \`sanity@${REFERENCE_TAG}\`
+
+
+${referenceTable}
+
+### π§ͺ Experiment result
+
+The performance result of this branch
+
+${experimentTable}
+
+### π Glossary
+
+> #### column definitions
+>
+> - **benchmark** β the name of the test, e.g. "article", followed by the label of the field being measured, e.g. "(title)".
+> - **latency** β the time between when a key was pressed and when it was rendered. derived from a set of samples. the median (p50) is shown to show the most common latency.
+> - **p75** β the 75th percentile of the input latency in the test run. 75% of the sampled inputs in this benchmark were processed faster than this value. this provides insight into the upper range of typical performance.
+> - **p90** β the 90th percentile of the input latency in the test run. 90% of the sampled inputs were faster than this. this metric helps identify slower interactions that occurred less frequently during the benchmark.
+> - **p99** β the 99th percentile of the input latency in the test run. only 1% of sampled inputs were slower than this. this represents the worst-case scenarios encountered during the benchmark, useful for identifying potential performance outliers.
+> - **blocking time** β the total time during which the main thread was blocked, preventing user input and UI updates. this metric helps identify performance bottlenecks that may cause the interface to feel unresponsive.
+> - **test duration** β how long the test run took to complete.
+
+
+`
+
+ // Write markdown file to root of results
+ const markdownOutputPath = path.join(workspaceDir, 'results', 'benchmark-results.md')
+ await fs.promises.writeFile(markdownOutputPath, markdown)
+}
+
+writeEPSResults()
diff --git a/perf/efps/index.ts b/perf/efps/index.ts
index 433c41040f5..d56c21fccfc 100644
--- a/perf/efps/index.ts
+++ b/perf/efps/index.ts
@@ -12,6 +12,8 @@ import {createClient} from '@sanity/client'
import chalk from 'chalk'
import Table from 'cli-table3'
import Ora from 'ora'
+import yargs from 'yargs'
+import {hideBin} from 'yargs/helpers'
import {exec} from './helpers/exec'
import {runTest} from './runTest'
@@ -19,8 +21,8 @@ import article from './tests/article/article'
import recipe from './tests/recipe/recipe'
import synthetic from './tests/synthetic/synthetic'
import {type EfpsAbResult, type EfpsResult, type EfpsTest} from './types'
+import {formatPercentageChange, isSignificantlyDifferent} from './utils'
-const WARNING_THRESHOLD = 0.2
const TEST_ATTEMPTS = process.env.CI ? 3 : 1
const HEADLESS = true
@@ -62,6 +64,32 @@ const resultsDir = path.join(
.toLowerCase()}`,
)
+const argv = await yargs(hideBin(process.argv)).option('shard', {
+ describe:
+ 'Shard number in the format "1/3" where 1 is the current shard and 3 is the total shards',
+ type: 'string',
+}).argv
+
+// Function to parse shard argument
+function parseShard(shard: string) {
+ const [current, total] = shard.split('/').map(Number)
+ if (!current || !total || current > total || current < 1) {
+ throw new Error(`Invalid shard format: ${shard}. It should be in the format "1/3"`)
+ }
+ return {current, total}
+}
+
+// Function to select tests based on shard
+function getTestsForShard(tests: EfpsTest[], shard: {current: number; total: number}) {
+ const testsPerShard = Math.ceil(tests.length / shard.total)
+ const start = (shard.current - 1) * testsPerShard
+ const end = start + testsPerShard
+ return tests.slice(start, end)
+}
+
+const shard = argv.shard ? parseShard(argv.shard) : null
+const selectedTests = shard ? getTestsForShard(TESTS, shard) : TESTS
+
const getSanityPkgPathForTag = async (tag: string) => {
const tmpDir = path.join(os.tmpdir(), `sanity-${tag}`)
@@ -92,28 +120,11 @@ const formatEfps = (latencyMs: number) => {
return chalk.red(rounded)
}
-const formatPercentageChange = (experiment: number, reference: number): string => {
- if (experiment < 16 && reference < 16) return '-/-%'
- const delta = (experiment - reference) / reference
- if (!delta) return '-/-%'
- const percentage = delta * 100
- const rounded = percentage.toFixed(1)
- const sign = delta >= 0 ? '+' : ''
- return `${sign}${rounded}%`
-}
-
-// For markdown formatting without colors
-const formatEfpsPlain = (latencyMs: number) => {
- const efps = 1000 / latencyMs
- const rounded = efps.toFixed(1)
-
- if (efps >= 100) return '99.9+'
- return rounded
-}
-
const spinner = Ora()
-spinner.info(`Running ${TESTS.length} tests: ${TESTS.map((t) => `'${t.name}'`).join(', ')}`)
+spinner.info(
+ `Running ${selectedTests.length} tests: ${selectedTests.map((t) => `'${t.name}'`).join(', ')}`,
+)
await exec({
text: ['Building the monorepoβ¦', 'Built monorepo'],
@@ -212,14 +223,29 @@ async function runAbTest(test: EfpsTest) {
)
}
-for (let i = 0; i < TESTS.length; i++) {
- const test = TESTS[i]
+for (let i = 0; i < selectedTests.length; i++) {
+ const test = selectedTests[i]
testResults.push({
name: test.name,
results: await runAbTest(test),
})
}
+// Write the test results as a json file to the results/report directory
+// the name should be in format `test-results__${shard}-${total-shards}.json`
+
+// Create the report directory if it doesn't exist
+await fs.promises.mkdir(path.join(workspaceDir, 'results', 'report'), {recursive: true})
+await fs.promises.writeFile(
+ path.join(
+ workspaceDir,
+ 'results',
+ 'report',
+ `test-results__${shard?.current}-${shard?.total}.json`,
+ ),
+ JSON.stringify(testResults, null, 2),
+)
+
const comparisonTableCli = new Table({
head: ['Benchmark', 'reference', 'experiment', 'Ξ (%)', ''].map((cell) => chalk.cyan(cell)),
})
@@ -237,13 +263,6 @@ const detailedInformationCliHead = [
const referenceTableCli = new Table({head: detailedInformationCliHead})
const experimentTableCli = new Table({head: detailedInformationCliHead})
-function isSignificantlyDifferent(experiment: number, reference: number) {
- // values are too small to and are already performing well
- if (experiment < 16 && reference < 16) return false
- const delta = (experiment - reference) / reference
- return delta >= WARNING_THRESHOLD
-}
-
for (const {name, results} of testResults) {
for (const {experiment, reference} of results) {
const significantlyDifferent = isSignificantlyDifferent(
@@ -296,124 +315,3 @@ console.log(referenceTableCli.toString())
console.log()
console.log('Experiment result')
console.log(experimentTableCli.toString())
-
-let comparisonTable = `
-| Benchmark | reference
latency of \`sanity@${REFERENCE_TAG}\` | experiment
latency of this branch | Ξ (%)
latency difference | |
-| :-- | :-- | :-- | :-- | --- |
-`
-
-const detailedInformationHeader = `
-| Benchmark | latency | p75 | p90 | p99 | blocking time | test duration |
-| --------- | ------: | --: | --: | --: | ------------: | ------------: |
-`
-
-let referenceTable = detailedInformationHeader
-let experimentTable = detailedInformationHeader
-
-for (const {name, results} of testResults) {
- for (const {experiment, reference} of results) {
- const significantlyDifferent = isSignificantlyDifferent(
- experiment.latency.p50,
- reference.latency.p50,
- )
-
- const sign = experiment.latency.p50 >= reference.latency.p50 ? '+' : ''
- const msDifference = `${sign}${(experiment.latency.p50 - reference.latency.p50).toFixed(0)}ms`
- const percentageChange = formatPercentageChange(experiment.latency.p50, reference.latency.p50)
-
- const benchmarkName = `${name} (${experiment.label})`
-
- comparisonTable +=
- // benchmark name
- `| ${benchmarkName} ` +
- // reference latency
- `| ${formatEfpsPlain(reference.latency.p50)} efps (${reference.latency.p50.toFixed(0)}ms) ` +
- // experiment latency
- `| ${formatEfpsPlain(experiment.latency.p50)} efps (${experiment.latency.p50.toFixed(0)}ms) ` +
- // difference
- `| ${msDifference} (${percentageChange}) ` +
- // status
- `| ${significantlyDifferent ? 'π΄' : 'β
'} ` +
- `|\n`
-
- referenceTable +=
- // benchmark name
- `| ${benchmarkName} ` +
- // latency
- `| ${reference.latency.p50.toFixed(0)}ms ` +
- // p75
- `| ${reference.latency.p75.toFixed(0)}ms ` +
- // p90
- `| ${reference.latency.p90.toFixed(0)}ms ` +
- // p99
- `| ${reference.latency.p99.toFixed(0)}ms ` +
- // blocking time
- `| ${reference.blockingTime.toFixed(0)}ms ` +
- // test duration
- `| ${(reference.runDuration / 1000).toFixed(1)}s ` +
- `|\n`
-
- experimentTable +=
- // benchmark name
- `| ${benchmarkName} ` +
- // latency
- `| ${experiment.latency.p50.toFixed(0)}ms ` +
- // p75
- `| ${experiment.latency.p75.toFixed(0)}ms ` +
- // p90
- `| ${experiment.latency.p90.toFixed(0)}ms ` +
- // p99
- `| ${experiment.latency.p99.toFixed(0)}ms ` +
- // blocking time
- `| ${experiment.blockingTime.toFixed(0)}ms ` +
- // test duration
- `| ${(experiment.runDuration / 1000).toFixed(1)}s ` +
- `|\n`
- }
-}
-
-const markdown = `### β‘οΈ Editor Performance Report
-
-Updated ${new Date().toUTCString()}
-
-${comparisonTable}
-
-> **efps** β editor "frames per second". The number of updates assumed to be possible within a second.
->
-> Derived from input latency. \`efps = 1000 / input_latency\`
-
-
-
-Detailed information
-
-### π Reference result
-
-The performance result of \`sanity@${REFERENCE_TAG}\`
-
-
-${referenceTable}
-
-### π§ͺ Experiment result
-
-The performance result of this branch
-
-${experimentTable}
-
-### π Glossary
-
-> #### column definitions
->
-> - **benchmark** β the name of the test, e.g. "article", followed by the label of the field being measured, e.g. "(title)".
-> - **latency** β the time between when a key was pressed and when it was rendered. derived from a set of samples. the median (p50) is shown to show the most common latency.
-> - **p75** β the 75th percentile of the input latency in the test run. 75% of the sampled inputs in this benchmark were processed faster than this value. this provides insight into the upper range of typical performance.
-> - **p90** β the 90th percentile of the input latency in the test run. 90% of the sampled inputs were faster than this. this metric helps identify slower interactions that occurred less frequently during the benchmark.
-> - **p99** β the 99th percentile of the input latency in the test run. only 1% of sampled inputs were slower than this. this represents the worst-case scenarios encountered during the benchmark, useful for identifying potential performance outliers.
-> - **blocking time** β the total time during which the main thread was blocked, preventing user input and UI updates. this metric helps identify performance bottlenecks that may cause the interface to feel unresponsive.
-> - **test duration** β how long the test run took to complete.
-
-
-`
-
-// Write markdown file to root of results
-const markdownOutputPath = path.join(workspaceDir, 'results', 'benchmark-results.md')
-await fs.promises.writeFile(markdownOutputPath, markdown)
diff --git a/perf/efps/package.json b/perf/efps/package.json
index 28ab326549b..09318541926 100644
--- a/perf/efps/package.json
+++ b/perf/efps/package.json
@@ -12,7 +12,8 @@
},
"scripts": {
"start": "node --import @swc-node/register/esm-register ./index.ts",
- "test": "npm start"
+ "test": "npm start",
+ "write:report": "node --import @swc-node/register/esm-register ./formatefpsResult.ts"
},
"devDependencies": {
"@sanity/client": "^6.22.1",
@@ -20,6 +21,7 @@
"@types/react": "^18.3.3",
"@types/react-dom": "^18.3.0",
"@types/serve-handler": "^6.1.4",
+ "@types/yargs": "^17.0.7",
"@vitejs/plugin-react": "^4.3.1",
"chalk": "^4.1.2",
"cli-table3": "^0.6.5",
@@ -33,6 +35,7 @@
"sanity": "workspace:*",
"serve-handler": "^6.1.5",
"source-map": "^0.7.4",
- "vite": "^5.4.2"
+ "vite": "^5.4.2",
+ "yargs": "17.3.0"
}
}
diff --git a/perf/efps/utils.ts b/perf/efps/utils.ts
new file mode 100644
index 00000000000..d7b05bbc7c1
--- /dev/null
+++ b/perf/efps/utils.ts
@@ -0,0 +1,18 @@
+const WARNING_THRESHOLD = 0.2
+
+export function isSignificantlyDifferent(experiment: number, reference: number): boolean {
+ // values are too small to and are already performing well
+ if (experiment < 16 && reference < 16) return false
+ const delta = (experiment - reference) / reference
+ return delta >= WARNING_THRESHOLD
+}
+
+export const formatPercentageChange = (experiment: number, reference: number): string => {
+ if (experiment < 16 && reference < 16) return '-/-%'
+ const delta = (experiment - reference) / reference
+ if (!delta) return '-/-%'
+ const percentage = delta * 100
+ const rounded = percentage.toFixed(1)
+ const sign = delta >= 0 ? '+' : ''
+ return `${sign}${rounded}%`
+}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 8b43258789d..81a926b00a4 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -1932,6 +1932,9 @@ importers:
'@types/serve-handler':
specifier: ^6.1.4
version: 6.1.4
+ '@types/yargs':
+ specifier: ^17.0.7
+ version: 17.0.33
'@vitejs/plugin-react':
specifier: ^4.3.1
version: 4.3.1(vite@5.4.5(@types/node@22.5.4)(terser@5.32.0))
@@ -1974,6 +1977,9 @@ importers:
vite:
specifier: ^5.4.2
version: 5.4.5(@types/node@22.5.4)(terser@5.32.0)
+ yargs:
+ specifier: 17.3.0
+ version: 17.3.0
perf/studio:
dependencies:
@@ -12013,6 +12019,10 @@ packages:
resolution: {integrity: sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==}
engines: {node: '>=10'}
+ yargs@17.3.0:
+ resolution: {integrity: sha512-GQl1pWyDoGptFPJx9b9L6kmR33TGusZvXIZUT+BOz9f7X2L94oeAskFYLEg/FkhV06zZPBYLvLZRWeYId29lew==}
+ engines: {node: '>=12'}
+
yargs@17.7.2:
resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==}
engines: {node: '>=12'}
@@ -24457,6 +24467,16 @@ snapshots:
y18n: 5.0.8
yargs-parser: 20.2.9
+ yargs@17.3.0:
+ dependencies:
+ cliui: 7.0.4
+ escalade: 3.2.0
+ get-caller-file: 2.0.5
+ require-directory: 2.1.1
+ string-width: 4.2.3
+ y18n: 5.0.8
+ yargs-parser: 21.1.1
+
yargs@17.7.2:
dependencies:
cliui: 8.0.1