diff --git a/docs/config/index.md b/docs/config/index.md index 0d4411014141..ae5e11f3e97d 100644 --- a/docs/config/index.md +++ b/docs/config/index.md @@ -312,6 +312,32 @@ By providing an object instead of a string you can define individual outputs whe To provide object via CLI command, use the following syntax: `--outputFile.json=./path --outputFile.junit=./other-path`. +#### benchmark.outputJson 1.6.0 {#benchmark-outputJson} + +- **Type:** `string | undefined` +- **Default:** `undefined` + +A file path to store the benchmark result, which can be used for `--compare` option later. + +For example: + +```sh +# save main branch's result +git checkout main +vitest bench --outputJson main.json + +# change a branch and compare against main +git checkout feature +vitest bench --compare main.json +``` + +#### benchmark.compare 1.6.0 {#benchmark-compare} + +- **Type:** `string | undefined` +- **Default:** `undefined` + +A file path to a previous benchmark result to compare against current runs. + ### alias - **Type:** `Record | Array<{ find: string | RegExp, replacement: string, customResolver?: ResolverFunction | ResolverObject }>` diff --git a/docs/guide/features.md b/docs/guide/features.md index 39c1bee5abdd..db0be1c40cbe 100644 --- a/docs/guide/features.md +++ b/docs/guide/features.md @@ -210,6 +210,9 @@ describe('sort', () => { }) ``` +Benchmark report +Benchmark report + ## Type Testing Experimental {#type-testing} Since Vitest 0.25.0 you can [write tests](/guide/testing-types) to catch type regressions. Vitest comes with [`expect-type`](https://github.com/mmkal/expect-type) package to provide you with a similar and easy to understand API. diff --git a/packages/vitest/src/defaults.ts b/packages/vitest/src/defaults.ts index a07b50cd3d6f..4a065414a40f 100644 --- a/packages/vitest/src/defaults.ts +++ b/packages/vitest/src/defaults.ts @@ -4,7 +4,7 @@ import { isCI } from './utils/env' export const defaultInclude = ['**/*.{test,spec}.?(c|m)[jt]s?(x)'] export const defaultExclude = ['**/node_modules/**', '**/dist/**', '**/cypress/**', '**/.{idea,git,cache,output,temp}/**', '**/{karma,rollup,webpack,vite,vitest,jest,ava,babel,nyc,cypress,tsup,build,eslint,prettier}.config.*'] -export const benchmarkConfigDefaults: Required> = { +export const benchmarkConfigDefaults: Required> = { include: ['**/*.{bench,benchmark}.?(c|m)[jt]s?(x)'], exclude: defaultExclude, includeSource: [], diff --git a/packages/vitest/src/node/cli/cac.ts b/packages/vitest/src/node/cli/cac.ts index 3eb2227157b5..dcdd78e3d501 100644 --- a/packages/vitest/src/node/cli/cac.ts +++ b/packages/vitest/src/node/cli/cac.ts @@ -1,14 +1,14 @@ import { normalize } from 'pathe' -import cac, { type CAC } from 'cac' +import cac, { type CAC, type Command } from 'cac' import c from 'picocolors' import { version } from '../../../package.json' import { toArray } from '../../utils/base' import type { Vitest, VitestRunMode } from '../../types' import type { CliOptions } from './cli-api' -import type { CLIOption } from './cli-config' -import { cliOptionsConfig } from './cli-config' +import type { CLIOption, CLIOptions as CLIOptionsConfig } from './cli-config' +import { benchCliOptionsConfig, cliOptionsConfig } from './cli-config' -function addCommand(cli: CAC, name: string, option: CLIOption) { +function addCommand(cli: CAC | Command, name: string, option: CLIOption) { const commandName = option.alias || name let command = option.shorthand ? `-${option.shorthand}, --${commandName}` : `--${commandName}` if ('argument' in option) @@ -56,17 +56,20 @@ interface CLIOptions { allowUnknownOptions?: boolean } +function addCliOptions(cli: CAC | Command, options: CLIOptionsConfig) { + for (const [optionName, option] of Object.entries(options)) { + if (option) + addCommand(cli, optionName, option) + } +} + export function createCLI(options: CLIOptions = {}) { const cli = cac('vitest') cli .version(version) - for (const optionName in cliOptionsConfig) { - const option = (cliOptionsConfig as any)[optionName] as CLIOption | null - if (option) - addCommand(cli, optionName, option) - } + addCliOptions(cli, cliOptionsConfig) cli.help((info) => { const helpSection = info.find(current => current.title?.startsWith('For more info, run any command')) @@ -158,9 +161,12 @@ export function createCLI(options: CLIOptions = {}) { .command('dev [...filters]', undefined, options) .action(watch) - cli - .command('bench [...filters]', undefined, options) - .action(benchmark) + addCliOptions( + cli + .command('bench [...filters]', undefined, options) + .action(benchmark), + benchCliOptionsConfig, + ) // TODO: remove in Vitest 2.0 cli diff --git a/packages/vitest/src/node/cli/cli-config.ts b/packages/vitest/src/node/cli/cli-config.ts index 868c0ad1583f..107ba5ae2929 100644 --- a/packages/vitest/src/node/cli/cli-config.ts +++ b/packages/vitest/src/node/cli/cli-config.ts @@ -640,4 +640,17 @@ export const cliOptionsConfig: VitestCLIOptions = { name: null, includeTaskLocation: null, snapshotEnvironment: null, + compare: null, + outputJson: null, +} + +export const benchCliOptionsConfig: Pick = { + compare: { + description: 'benchmark output file to compare against', + argument: '', + }, + outputJson: { + description: 'benchmark output file', + argument: '', + }, } diff --git a/packages/vitest/src/node/config.ts b/packages/vitest/src/node/config.ts index 66bbdcb6e899..499735ca876f 100644 --- a/packages/vitest/src/node/config.ts +++ b/packages/vitest/src/node/config.ts @@ -381,6 +381,12 @@ export function resolveConfig( if (options.outputFile) resolved.benchmark.outputFile = options.outputFile + + // --compare from cli + if (options.compare) + resolved.benchmark.compare = options.compare + if (options.outputJson) + resolved.benchmark.outputJson = options.outputJson } resolved.setupFiles = toArray(resolved.setupFiles || []).map(file => diff --git a/packages/vitest/src/node/reporters/benchmark/index.ts b/packages/vitest/src/node/reporters/benchmark/index.ts index 00aa184557b4..554c5d1998cf 100644 --- a/packages/vitest/src/node/reporters/benchmark/index.ts +++ b/packages/vitest/src/node/reporters/benchmark/index.ts @@ -1,10 +1,8 @@ import { VerboseReporter } from '../verbose' -import { JsonReporter } from './json' import { TableReporter } from './table' export const BenchmarkReportsMap = { default: TableReporter, verbose: VerboseReporter, - json: JsonReporter, } export type BenchmarkBuiltinReporters = keyof typeof BenchmarkReportsMap diff --git a/packages/vitest/src/node/reporters/benchmark/json.ts b/packages/vitest/src/node/reporters/benchmark/json.ts deleted file mode 100644 index 23bcb87d010f..000000000000 --- a/packages/vitest/src/node/reporters/benchmark/json.ts +++ /dev/null @@ -1,82 +0,0 @@ -import { existsSync, promises as fs } from 'node:fs' -import { dirname, resolve } from 'pathe' -import type { Vitest } from '../../../node' -import type { BenchTaskResult, File, Reporter } from '../../../types' -import { getSuites, getTests } from '../../../utils' -import { getOutputFile } from '../../../utils/config-helpers' - -interface FormattedTestResults { - numTotalTests: number - numTotalTestSuites: number - testResults: Record -} - -export class JsonReporter implements Reporter { - start = 0 - ctx!: Vitest - - onInit(ctx: Vitest): void { - this.ctx = ctx - } - - protected async logTasks(files: File[]) { - const suites = getSuites(files) - const numTotalTestSuites = suites.length - const tests = getTests(files) - const numTotalTests = tests.length - const testResults: Record = {} - const outputFile = getOutputFile(this.ctx.config.benchmark, 'json') - for (const file of files) { - const tests = getTests([file]) - for (const test of tests) { - const res = test.result?.benchmark - if (!res || test.mode === 'skip') // TODO mark as skipped - continue - if (!outputFile) - res.samples = 'ignore on terminal' as any - testResults[test.suite!.name] = (testResults[test.suite!.name] || []).concat(res) - } - - if (tests.some(t => t.result?.state === 'run')) { - this.ctx.logger.warn('WARNING: Some tests are still running when generating the json report.' - + 'This is likely an internal bug in Vitest.' - + 'Please report it to https://github.com/vitest-dev/vitest/issues') - } - } - - const result: FormattedTestResults = { - numTotalTestSuites, - numTotalTests, - testResults, - } - - await this.writeReport(JSON.stringify(result, null, 2)) - } - - async onFinished(files = this.ctx.state.getFiles()) { - await this.logTasks(files) - } - - /** - * Writes the report to an output file if specified in the config, - * or logs it to the console otherwise. - * @param report - */ - async writeReport(report: string) { - const outputFile = getOutputFile(this.ctx.config.benchmark, 'json') - - if (outputFile) { - const reportFile = resolve(this.ctx.config.root, outputFile) - - const outputDirectory = dirname(reportFile) - if (!existsSync(outputDirectory)) - await fs.mkdir(outputDirectory, { recursive: true }) - - await fs.writeFile(reportFile, report, 'utf-8') - this.ctx.logger.log(`json report written to ${reportFile}`) - } - else { - this.ctx.logger.log(report) - } - } -} diff --git a/packages/vitest/src/node/reporters/benchmark/table/index.ts b/packages/vitest/src/node/reporters/benchmark/table/index.ts index a7fa9fae5f05..12a06fca5ca8 100644 --- a/packages/vitest/src/node/reporters/benchmark/table/index.ts +++ b/packages/vitest/src/node/reporters/benchmark/table/index.ts @@ -1,8 +1,11 @@ +import fs from 'node:fs' import c from 'picocolors' +import * as pathe from 'pathe' import type { TaskResultPack } from '@vitest/runner' import type { UserConsoleLog } from '../../../../types/general' import { BaseReporter } from '../../base' -import { getFullName } from '../../../../utils' +import type { BenchmarkResult, File } from '../../../../types' +import { getFullName, getTasks } from '../../../../utils' import { getStateSymbol } from '../../renderers/utils' import { type TableRendererOptions, createTableRenderer, renderTree } from './tableRender' @@ -20,11 +23,24 @@ export class TableReporter extends BaseReporter { super.onWatcherStart() } - onCollected() { + async onCollected() { + this.rendererOptions.logger = this.ctx.logger + this.rendererOptions.showHeap = this.ctx.config.logHeapUsage + this.rendererOptions.slowTestThreshold = this.ctx.config.slowTestThreshold + if (this.ctx.config.benchmark?.compare) { + const compareFile = pathe.resolve(this.ctx.config.root, this.ctx.config.benchmark?.compare) + try { + this.rendererOptions.compare = flattenFormattedBenchamrkReport( + JSON.parse( + await fs.promises.readFile(compareFile, 'utf-8'), + ), + ) + } + catch (e) { + this.ctx.logger.error(`Failed to read '${compareFile}'`, e) + } + } if (this.isTTY) { - this.rendererOptions.logger = this.ctx.logger - this.rendererOptions.showHeap = this.ctx.config.logHeapUsage - this.rendererOptions.slowTestThreshold = this.ctx.config.slowTestThreshold const files = this.ctx.state.getFiles(this.watchFilters) if (!this.renderer) this.renderer = createTableRenderer(files, this.rendererOptions).start() @@ -56,6 +72,18 @@ export class TableReporter extends BaseReporter { await this.stopListRender() this.ctx.logger.log() await super.onFinished(files, errors) + + // write output for future comparison + let outputFile = this.ctx.config.benchmark?.outputJson + if (outputFile) { + outputFile = pathe.resolve(this.ctx.config.root, outputFile) + const outputDirectory = pathe.dirname(outputFile) + if (!fs.existsSync(outputDirectory)) + await fs.promises.mkdir(outputDirectory, { recursive: true }) + const output = createFormattedBenchamrkReport(files) + await fs.promises.writeFile(outputFile, JSON.stringify(output, null, 2)) + this.ctx.logger.log(`Benchmark report written to ${outputFile}`) + } } async onWatcherStart() { @@ -80,3 +108,70 @@ export class TableReporter extends BaseReporter { super.onUserConsoleLog(log) } } + +export interface FormattedBenchamrkReport { + files: { + filepath: string + groups: FormattedBenchmarkGroup[] + }[] +} + +// flat results with TaskId as a key +export interface FlatBenchmarkReport { + [id: string]: FormattedBenchmarkResult +} + +interface FormattedBenchmarkGroup { + fullName: string + benchmarks: FormattedBenchmarkResult[] +} + +export type FormattedBenchmarkResult = Omit & { + id: string + sampleCount: number +} + +function createFormattedBenchamrkReport(files: File[]) { + const report: FormattedBenchamrkReport = { files: [] } + for (const file of files) { + const groups: FormattedBenchmarkGroup[] = [] + for (const task of getTasks(file)) { + if (task && task.type === 'suite') { + const benchmarks: FormattedBenchmarkResult[] = [] + for (const t of task.tasks) { + const benchmark = t.meta.benchmark && t.result?.benchmark + if (benchmark) { + const { samples, ...rest } = benchmark + benchmarks.push({ + id: t.id, + sampleCount: samples.length, + ...rest, + }) + } + } + if (benchmarks.length) { + groups.push({ + fullName: getFullName(task, ' > '), + benchmarks, + }) + } + } + } + report.files.push({ + filepath: file.filepath, + groups, + }) + } + return report +} + +function flattenFormattedBenchamrkReport(report: FormattedBenchamrkReport): FlatBenchmarkReport { + const flat: FlatBenchmarkReport = {} + for (const file of report.files) { + for (const group of file.groups) { + for (const t of group.benchmarks) + flat[t.id] = t + } + } + return flat +} diff --git a/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts b/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts index ceeac1327e6f..2a49071b456d 100644 --- a/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts +++ b/packages/vitest/src/node/reporters/benchmark/table/tableRender.ts @@ -1,17 +1,19 @@ import c from 'picocolors' import cliTruncate from 'cli-truncate' import stripAnsi from 'strip-ansi' -import type { Benchmark, BenchmarkResult, Task } from '../../../../types' +import type { BenchmarkResult, Task } from '../../../../types' import { getTests, notNullish } from '../../../../utils' import { F_RIGHT } from '../../../../utils/figures' import type { Logger } from '../../../logger' import { getCols, getStateSymbol } from '../../renderers/utils' +import type { FlatBenchmarkReport } from '.' export interface TableRendererOptions { renderSucceed?: boolean logger: Logger showHeap: boolean slowTestThreshold: number + compare?: FlatBenchmarkReport } const outputMap = new WeakMap() @@ -35,19 +37,6 @@ function formatNumber(number: number) { const tableHead = ['name', 'hz', 'min', 'max', 'mean', 'p75', 'p99', 'p995', 'p999', 'rme', 'samples'] -function renderTableHead(tasks: Task[]) { - const benches = tasks - .map(i => i.meta?.benchmark ? i.result?.benchmark : undefined) - .filter(notNullish) - const allItems = benches.map(renderBenchmarkItems).concat([tableHead]) - return `${' '.repeat(3)}${tableHead.map((i, idx) => { - const width = Math.max(...allItems.map(i => i[idx].length)) - return idx - ? i.padStart(width, ' ') - : i.padEnd(width, ' ') // name - }).map(c.bold).join(' ')}` -} - function renderBenchmarkItems(result: BenchmarkResult) { return [ result.name, @@ -63,23 +52,32 @@ function renderBenchmarkItems(result: BenchmarkResult) { result.samples.length.toString(), ] } -function renderBenchmark(task: Benchmark, tasks: Task[]): string { - const result = task.result?.benchmark - if (!result) - return task.name - - const benches = tasks - .map(i => i.meta?.benchmark ? i.result?.benchmark : undefined) - .filter(notNullish) - const allItems = benches.map(renderBenchmarkItems).concat([tableHead]) - const items = renderBenchmarkItems(result) - const padded = items.map((i, idx) => { - const width = Math.max(...allItems.map(i => i[idx].length)) - return idx - ? i.padStart(width, ' ') - : i.padEnd(width, ' ') // name - }) +function computeColumnWidths(results: BenchmarkResult[]): number[] { + const rows = [ + tableHead, + ...results.map(v => renderBenchmarkItems(v)), + ] + return Array.from( + tableHead, + (_, i) => Math.max(...rows.map(row => stripAnsi(row[i]).length)), + ) +} + +function padRow(row: string[], widths: number[]) { + return row.map((v, i) => + i + ? v.padStart(widths[i], ' ') + : v.padEnd(widths[i], ' '), // name + ) +} + +function renderTableHead(widths: number[]) { + return ' '.repeat(3) + padRow(tableHead, widths).map(c.bold).join(' ') +} + +function renderBenchmark(result: BenchmarkResult, widths: number[]) { + const padded = padRow(renderBenchmarkItems(result), widths) return [ padded[0], // name c.blue(padded[1]), // hz @@ -92,23 +90,42 @@ function renderBenchmark(task: Benchmark, tasks: Task[]): string { c.cyan(padded[8]), // p999 c.dim(padded[9]), // rem c.dim(padded[10]), // sample - result.rank === 1 - ? c.bold(c.green(' fastest')) - : (result.rank === benches.length && benches.length > 2) - ? c.bold(c.gray(' slowest')) - : '', ].join(' ') } export function renderTree(tasks: Task[], options: TableRendererOptions, level = 0, shallow = false): string { const output: string[] = [] + const benchMap: Record = {} + for (const t of tasks) { + if (t.meta.benchmark && t.result?.benchmark) { + benchMap[t.id] = { + current: t.result.benchmark, + } + const baseline = options.compare?.[t.id] + if (baseline) { + benchMap[t.id].baseline = { + ...baseline, + samples: Array(baseline.sampleCount), + } + } + } + } + const benchCount = Object.entries(benchMap).length + + // compute column widths + const columnWidths = computeColumnWidths( + Object.values(benchMap) + .flatMap(v => [v.current, v.baseline]) + .filter(notNullish), + ) + let idx = 0 for (const task of tasks) { const padding = ' '.repeat(level ? 1 : 0) let prefix = '' if (idx === 0 && task.meta?.benchmark) - prefix += `${renderTableHead(tasks)}\n${padding}` + prefix += `${renderTableHead(columnWidths)}\n${padding}` prefix += ` ${getStateSymbol(task)} ` @@ -131,11 +148,37 @@ export function renderTree(tasks: Task[], options: TableRendererOptions, level = if (level === 0) name = formatFilepath(name) - const body = task.meta?.benchmark - ? renderBenchmark(task as Benchmark, tasks) - : name + const bench = benchMap[task.id] + if (bench) { + let body = renderBenchmark(bench.current, columnWidths) + if (options.compare && bench.baseline) { + if (bench.current.hz) { + const diff = bench.current.hz / bench.baseline.hz + const diffFixed = diff.toFixed(2) + if (diffFixed === '1.0.0') + body += ` ${c.gray(`[${diffFixed}x]`)}` + if (diff > 1) + body += ` ${c.blue(`[${diffFixed}x] ⇑`)}` + else + body += ` ${c.red(`[${diffFixed}x] ⇓`)}` + } + output.push(padding + prefix + body + suffix) + const bodyBaseline = renderBenchmark(bench.baseline, columnWidths) + output.push(`${padding} ${bodyBaseline} ${c.dim('(baseline)')}`) + } + else { + if (bench.current.rank === 1 && benchCount > 1) + body += ` ${c.bold(c.green(' fastest'))}` + + if (bench.current.rank === benchCount && benchCount > 2) + body += ` ${c.bold(c.gray(' slowest'))}` - output.push(padding + prefix + body + suffix) + output.push(padding + prefix + body + suffix) + } + } + else { + output.push(padding + prefix + name + suffix) + } if ((task.result?.state !== 'pass') && outputMap.get(task) != null) { let data: string | undefined = outputMap.get(task) diff --git a/packages/vitest/src/types/benchmark.ts b/packages/vitest/src/types/benchmark.ts index addb0bc71b60..a985e15146d6 100644 --- a/packages/vitest/src/types/benchmark.ts +++ b/packages/vitest/src/types/benchmark.ts @@ -39,6 +39,16 @@ export interface BenchmarkUserOptions { * Also definable individually per reporter by using an object instead. */ outputFile?: string | (Partial> & Record) + + /** + * benchmark output file to compare against + */ + compare?: string + + /** + * benchmark output file + */ + outputJson?: string } export interface Benchmark extends Custom { diff --git a/packages/vitest/src/types/config.ts b/packages/vitest/src/types/config.ts index 0e507668b2a3..83365a3d6044 100644 --- a/packages/vitest/src/types/config.ts +++ b/packages/vitest/src/types/config.ts @@ -861,6 +861,16 @@ export interface UserConfig extends InlineConfig { * Override vite config's clearScreen from cli */ clearScreen?: boolean + + /** + * benchmark.compare option exposed at the top level for cli + */ + compare?: string + + /** + * benchmark.outputJson option exposed at the top level for cli + */ + outputJson?: string } export interface ResolvedConfig extends Omit, 'config' | 'filters' | 'browser' | 'coverage' | 'testNamePattern' | 'related' | 'api' | 'reporters' | 'resolveSnapshotPath' | 'benchmark' | 'shard' | 'cache' | 'sequence' | 'typecheck' | 'runner' | 'poolOptions' | 'pool' | 'cliExclude'> { @@ -887,7 +897,7 @@ export interface ResolvedConfig extends Omit, 'config' | 'f api?: ApiConfig cliExclude?: string[] - benchmark?: Required> & Pick + benchmark?: Required> & Pick shard?: { index: number count: number diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9734f54d6463..435b20876ddc 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -16870,6 +16870,7 @@ packages: /workbox-google-analytics@7.0.0: resolution: {integrity: sha512-MEYM1JTn/qiC3DbpvP2BVhyIH+dV/5BjHk756u9VbwuAhu0QHyKscTnisQuz21lfRpOwiS9z4XdqeVAKol0bzg==} + deprecated: It is not compatible with newer versions of GA starting with v4, as long as you are using GAv3 it should be ok, but the package is not longer being maintained dependencies: workbox-background-sync: 7.0.0 workbox-core: 7.0.0 diff --git a/test/benchmark/fixtures/compare/basic.bench.ts b/test/benchmark/fixtures/compare/basic.bench.ts new file mode 100644 index 000000000000..0fa89fef4ce1 --- /dev/null +++ b/test/benchmark/fixtures/compare/basic.bench.ts @@ -0,0 +1,13 @@ +import { bench, describe } from 'vitest' + +const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)); + +describe('suite', () => { + bench('sleep10', async () => { + await sleep(10) + }, { time: 20, iterations: 0 }) + + bench('sleep100', async () => { + await sleep(100); + }, { time: 200, iterations: 0 }) +}) diff --git a/test/benchmark/fixtures/compare/vitest.config.ts b/test/benchmark/fixtures/compare/vitest.config.ts new file mode 100644 index 000000000000..abed6b2116e1 --- /dev/null +++ b/test/benchmark/fixtures/compare/vitest.config.ts @@ -0,0 +1,3 @@ +import { defineConfig } from 'vitest/config' + +export default defineConfig({}) diff --git a/test/benchmark/package.json b/test/benchmark/package.json index 5566080b1da6..b1fee30693c5 100644 --- a/test/benchmark/package.json +++ b/test/benchmark/package.json @@ -1,5 +1,5 @@ { - "name": "@vitest/benchmark-sequential", + "name": "@vitest/test-benchmark", "type": "module", "private": true, "scripts": { diff --git a/test/benchmark/test/basic.test.ts b/test/benchmark/test/basic.test.ts index 46021e7483f4..b4cac136a14e 100644 --- a/test/benchmark/test/basic.test.ts +++ b/test/benchmark/test/basic.test.ts @@ -1,6 +1,7 @@ import fs from 'node:fs' import { expect, it } from 'vitest' import * as pathe from 'pathe' +import type { FormattedBenchamrkReport } from 'vitest/src/node/reporters/benchmark/table/index.js' import { runVitest } from '../../test-utils' it('basic', { timeout: 60_000 }, async () => { @@ -8,31 +9,76 @@ it('basic', { timeout: 60_000 }, async () => { const benchFile = pathe.join(root, 'bench.json') fs.rmSync(benchFile, { force: true }) - await runVitest({ + const result = await runVitest({ root, allowOnly: true, - benchmark: { - reporters: 'json', - outputFile: 'bench.json', - }, + outputJson: 'bench.json', }, [], 'benchmark') + expect(result.exitCode).toBe(0) const benchResult = await fs.promises.readFile(benchFile, 'utf-8') - const resultJson = JSON.parse(benchResult) - - expect(Object.keys(resultJson.testResults)).toEqual( - expect.arrayContaining([ - 'sort', - 'timeout', - 'a0', - 'c1', - 'a2', - 'b3', - 'b4', - ]), - ) - - const skipped = ['skip', 's0', 's1', 's2', 's3', 'sb4', 's4', 'unimplemented suite', 'unimplemented test'] - for (const b of skipped) - expect(benchResult).not.toContain(b) + const resultJson: FormattedBenchamrkReport = JSON.parse(benchResult) + const names = resultJson.files.map(f => f.groups.map(g => [g.fullName, g.benchmarks.map(b => b.name)])) + expect(names).toMatchInlineSnapshot(` + [ + [ + [ + "base.bench.ts > sort", + [ + "normal", + "reverse", + ], + ], + [ + "base.bench.ts > timeout", + [ + "timeout100", + "timeout75", + "timeout50", + "timeout25", + ], + ], + ], + [], + [ + [ + "only.bench.ts", + [ + "visited", + "visited2", + ], + ], + [ + "only.bench.ts > a0", + [ + "0", + ], + ], + [ + "only.bench.ts > a1 > b1 > c1", + [ + "1", + ], + ], + [ + "only.bench.ts > a2", + [ + "2", + ], + ], + [ + "only.bench.ts > a3 > b3", + [ + "3", + ], + ], + [ + "only.bench.ts > a4 > b4", + [ + "4", + ], + ], + ], + ] + `) }) diff --git a/test/benchmark/test/compare.test.ts b/test/benchmark/test/compare.test.ts new file mode 100644 index 000000000000..5e48a4065592 --- /dev/null +++ b/test/benchmark/test/compare.test.ts @@ -0,0 +1,38 @@ +import fs from 'node:fs' +import { expect, test } from 'vitest' +import { runVitest } from '../../test-utils' + +test('compare', { timeout: 60_000 }, async () => { + await fs.promises.rm('./fixtures/compare/bench.json', { force: true }) + + // --outputJson + { + const result = await runVitest({ + root: './fixtures/compare', + outputJson: './bench.json', + reporters: ['default'], + }, [], 'benchmark') + expect(result.exitCode).toBe(0) + expect(fs.existsSync('./fixtures/compare/bench.json')).toBe(true) + } + + // --compare + { + const result = await runVitest({ + root: './fixtures/compare', + compare: './bench.json', + reporters: ['default'], + }, [], 'benchmark') + expect(result.exitCode).toBe(0) + const lines = result.stdout.split('\n').slice(3).slice(0, 6) + const expected = ` +✓ basic.bench.ts > suite + name + · sleep10 + (baseline) + · sleep100 + (baseline) + ` + expect(lines).toMatchObject(expected.trim().split('\n').map(s => expect.stringContaining(s.trim()))) + } +}) diff --git a/test/core/test/cli-test.test.ts b/test/core/test/cli-test.test.ts index cc69700bb918..384b96166ee4 100644 --- a/test/core/test/cli-test.test.ts +++ b/test/core/test/cli-test.test.ts @@ -16,7 +16,7 @@ function parseArguments(commands: string, full = false) { delete options.color } - return { options, args } + return { options, args, matchedCommand: vitestCli.matchedCommand } } function getCLIOptions(commands: string) { @@ -131,6 +131,24 @@ test('fails when an array is passed down for a single value', async () => { .toThrowErrorMatchingInlineSnapshot(`[Error: Expected a single value for option "--coverage.provider ", received ["v8", "istanbul"]]`) }) +test('bench only options', async () => { + expect(() => + parseArguments('--compare file.json').matchedCommand?.checkUnknownOptions(), + ).toThrowErrorMatchingInlineSnapshot( + `[CACError: Unknown option \`--compare\`]`, + ) + + expect(() => + parseArguments( + 'bench --compare file.json', + ).matchedCommand?.checkUnknownOptions(), + ).not.toThrow() + + expect(parseArguments('bench --compare file.json').options).toEqual({ + compare: 'file.json', + }) +}) + test('even if coverage is boolean, don\'t fail', () => { expect(getCLIOptions('--coverage --coverage.provider v8').coverage).toEqual({ enabled: true,