mirror of
https://github.com/vitest-dev/vitest.git
synced 2026-01-25 16:48:18 +00:00
docs: add information about benchmarking (#2001)
This commit is contained in:
parent
46f6bfbc9a
commit
f00a1ac228
1
.gitignore
vendored
1
.gitignore
vendored
@ -15,6 +15,7 @@ dist
|
||||
ltex*
|
||||
.DS_Store
|
||||
bench/test/*/*/
|
||||
**/benchmark/bench.json
|
||||
cypress/videos
|
||||
cypress/downloads
|
||||
cypress/screenshots
|
||||
|
||||
@ -212,11 +212,98 @@ In Jest, `TestFunction` can also be of type `(done: DoneCallback) => void`. If t
|
||||
|
||||
If you want to have access to `TestContext`, use `describe.each` with a single test.
|
||||
|
||||
## describe
|
||||
## bench
|
||||
|
||||
When you use `test` in the top level of file, they are collected as part of the implicit suite for it. Using `describe` you can define a new suite in the current context, as a set of related tests and other nested suites. A suite lets you organize your tests so reports are more clear.
|
||||
- **Type:** `(name: string, fn: BenchFunction, options?: BenchOptions) => void`
|
||||
|
||||
`bench` defines a benchmark. In Vitest terms benchmark is a function that defines a series of operations. Vitest runs this function multiple times to display different performance results.
|
||||
|
||||
Vitest uses [`tinybench`](https://github.com/tinylibs/tinybench) library under the hood, inheriting all its options that can be used as a third argument.
|
||||
|
||||
```ts
|
||||
import { bench } from 'vitest'
|
||||
|
||||
bench('normal sorting', () => {
|
||||
const x = [1, 5, 4, 2, 3]
|
||||
x.sort((a, b) => {
|
||||
return a - b
|
||||
})
|
||||
}, { time: 1000 })
|
||||
```
|
||||
|
||||
```ts
|
||||
export interface Options {
|
||||
/**
|
||||
* time needed for running a benchmark task (milliseconds)
|
||||
* @default 500
|
||||
*/
|
||||
time?: number
|
||||
|
||||
/**
|
||||
* number of times that a task should run if even the time option is finished
|
||||
* @default 10
|
||||
*/
|
||||
iterations?: number
|
||||
|
||||
/**
|
||||
* function to get the current timestamp in milliseconds
|
||||
*/
|
||||
now?: () => number
|
||||
|
||||
/**
|
||||
* An AbortSignal for aborting the benchmark
|
||||
*/
|
||||
signal?: AbortSignal
|
||||
|
||||
/**
|
||||
* warmup time (milliseconds)
|
||||
* @default 100ms
|
||||
*/
|
||||
warmupTime?: number
|
||||
|
||||
/**
|
||||
* warmup iterations
|
||||
* @default 5
|
||||
*/
|
||||
warmupIterations?: number
|
||||
|
||||
/**
|
||||
* setup function to run before each benchmark task (cycle)
|
||||
*/
|
||||
setup?: Hook
|
||||
|
||||
/**
|
||||
* teardown function to run after each benchmark task (cycle)
|
||||
*/
|
||||
teardown?: Hook
|
||||
}
|
||||
```
|
||||
|
||||
### bench.skip
|
||||
|
||||
- **Type:** `(name: string, fn: BenchFunction, options?: BenchOptions) => void`
|
||||
|
||||
You can use `bench.skip` syntax to skip running certain benchmarks.
|
||||
|
||||
```ts
|
||||
import { bench } from 'vitest'
|
||||
|
||||
bench.skip('normal sorting', () => {
|
||||
const x = [1, 5, 4, 2, 3]
|
||||
x.sort((a, b) => {
|
||||
return a - b
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
## describe
|
||||
|
||||
When you use `test` or `bench` in the top level of file, they are collected as part of the implicit suite for it. Using `describe` you can define a new suite in the current context, as a set of related tests or benchmarks and other nested suites. A suite lets you organize your tests and benchmarks so reports are more clear.
|
||||
|
||||
```ts
|
||||
// basic.spec.ts
|
||||
// organizing tests
|
||||
|
||||
import { describe, expect, test } from 'vitest'
|
||||
|
||||
const person = {
|
||||
@ -239,7 +326,30 @@ When you use `test` in the top level of file, they are collected as part of the
|
||||
})
|
||||
```
|
||||
|
||||
You can also nest describe blocks if you have a hierarchy of tests:
|
||||
```ts
|
||||
// basic.bench.ts
|
||||
// organizing benchmarks
|
||||
|
||||
import { bench, describe } from 'vitest'
|
||||
|
||||
describe('sort', () => {
|
||||
bench('normal', () => {
|
||||
const x = [1, 5, 4, 2, 3]
|
||||
x.sort((a, b) => {
|
||||
return a - b
|
||||
})
|
||||
})
|
||||
|
||||
bench('reverse', () => {
|
||||
const x = [1, 5, 4, 2, 3]
|
||||
x.reverse().sort((a, b) => {
|
||||
return a - b
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
You can also nest describe blocks if you have a hierarchy of tests or benchmarks:
|
||||
|
||||
```ts
|
||||
import { describe, expect, test } from 'vitest'
|
||||
@ -360,6 +470,7 @@ When you use `test` in the top level of file, they are collected as part of the
|
||||
// An entry will be shown in the report for this suite
|
||||
describe.todo('unimplemented suite')
|
||||
```
|
||||
|
||||
### describe.each
|
||||
|
||||
- **Type:** `(cases: ReadonlyArray<T>): (name: string, fn: (...args: T[]) => void) => void`
|
||||
|
||||
@ -132,6 +132,42 @@ If disabled, your `alias` and `<plugin>.resolveId` won't affect imports inside `
|
||||
|
||||
Interpret CJS module's default as named exports.
|
||||
|
||||
### benchmark
|
||||
|
||||
- **Type:** `{ include?, exclude?, ... }`
|
||||
|
||||
Options used when running `vitest bench`.
|
||||
|
||||
### benchmark.include
|
||||
|
||||
- **Type:** `string[]`
|
||||
- **Default:** `['**/*.{bench,benchmark}.{js,mjs,cjs,ts,mts,cts,jsx,tsx}']`
|
||||
|
||||
Include globs for benchmark test files
|
||||
|
||||
### benchmark.exclude
|
||||
|
||||
- **Type:** `string[]`
|
||||
- **Default:** `['node_modules', 'dist', '.idea', '.git', '.cache']`
|
||||
|
||||
Exclude globs for benchmark test files
|
||||
|
||||
### benchmark.includeSource
|
||||
|
||||
- **Type:** `string[]`
|
||||
- **Default:** `[]`
|
||||
|
||||
Include globs for in-source benchmark test files. This option is similar to [`includeSource`](#includesource).
|
||||
|
||||
When defined, Vitest will run all matched files with `import.meta.vitest` inside.
|
||||
|
||||
### benchmark.reporters
|
||||
|
||||
- **Type:** `Arrayable<BenchmarkBuiltinReporters | Reporter>`
|
||||
- **Default:** `'default'`
|
||||
|
||||
Custom reporter for output. Can contain one or more built-in report names, reporter instances, and/or paths to custom reporters.
|
||||
|
||||
### alias
|
||||
|
||||
- **Type:** `Record<string, string> | Array<{ find: string | RegExp, replacement: string, customResolver?: ResolverFunction | ResolverObject }>`
|
||||
|
||||
@ -175,3 +175,27 @@ if (import.meta.vitest) {
|
||||
```
|
||||
|
||||
Learn more at [In-source testing](/guide/in-source).
|
||||
|
||||
## Benchmarking <sup><code>experimental</code></sup>
|
||||
|
||||
Since Vitest 0.23.0, you can run benchmark tests with [`bench`](/api/#bench) function to compare performance results.
|
||||
|
||||
```ts
|
||||
import { bench, describe } from 'vitest'
|
||||
|
||||
describe('sort', () => {
|
||||
bench('normal', () => {
|
||||
const x = [1, 5, 4, 2, 3]
|
||||
x.sort((a, b) => {
|
||||
return a - b
|
||||
})
|
||||
})
|
||||
|
||||
bench('reverse', () => {
|
||||
const x = [1, 5, 4, 2, 3]
|
||||
x.reverse().sort((a, b) => {
|
||||
return a - b
|
||||
})
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
@ -29,16 +29,16 @@ export class JsonReporter implements Reporter {
|
||||
for (const file of files) {
|
||||
const tests = getTests([file])
|
||||
for (const test of tests) {
|
||||
const res = test.result!.benchmark!
|
||||
const res = test.result?.benchmark
|
||||
if (!res || test.mode === 'skip') // TODO mark as skipped
|
||||
continue
|
||||
if (!outputFile)
|
||||
res.samples = 'ignore on terminal' as any
|
||||
|
||||
testResults[test.suite.name] = (testResults[test.suite.name] || []).concat(res)
|
||||
}
|
||||
|
||||
// test.suite.name
|
||||
if (tests.some(t => t.result?.state === 'run')) {
|
||||
this.ctx.logger.warn('WARNING: Some tests are still running when generating the markdown report.'
|
||||
this.ctx.logger.warn('WARNING: Some tests are still running when generating the json report.'
|
||||
+ 'This is likely an internal bug in Vitest.'
|
||||
+ 'Please report it to https://github.com/vitest-dev/vitest/issues')
|
||||
}
|
||||
@ -73,7 +73,7 @@ export class JsonReporter implements Reporter {
|
||||
await fs.mkdir(outputDirectory, { recursive: true })
|
||||
|
||||
await fs.writeFile(reportFile, report, 'utf-8')
|
||||
this.ctx.logger.log(`markdown report written to ${reportFile}`)
|
||||
this.ctx.logger.log(`json report written to ${reportFile}`)
|
||||
}
|
||||
else {
|
||||
this.ctx.logger.log(report)
|
||||
|
||||
@ -18,7 +18,7 @@ export interface BenchmarkUserOptions {
|
||||
exclude?: string[]
|
||||
|
||||
/**
|
||||
* Include globs for in-source test files
|
||||
* Include globs for in-source benchmark test files
|
||||
*
|
||||
* @default []
|
||||
*/
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
"name": "@vitest/benchmark",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"test": "vitest bench",
|
||||
"test": "node test.mjs",
|
||||
"testu": "vitest -u",
|
||||
"coverage": "vitest run --coverage"
|
||||
}
|
||||
|
||||
18
test/benchmark/test.mjs
Normal file
18
test/benchmark/test.mjs
Normal file
@ -0,0 +1,18 @@
|
||||
import { readFile } from 'fs/promises'
|
||||
import { startVitest } from 'vitest/node'
|
||||
|
||||
const success = await startVitest('benchmark', ['base.bench', 'mode.bench'], {
|
||||
run: true,
|
||||
update: false,
|
||||
outputFile: './bench.json', // TODO move outputFile to benchmark
|
||||
benchmark: {
|
||||
reporters: ['json'],
|
||||
},
|
||||
})
|
||||
|
||||
const benchResult = await readFile('./bench.json', 'utf-8')
|
||||
|
||||
if (benchResult.includes('skip'))
|
||||
process.exit(1)
|
||||
|
||||
process.exit(success ? 0 : 1)
|
||||
11
test/benchmark/test/mode.bench.ts
Normal file
11
test/benchmark/test/mode.bench.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { bench, describe } from 'vitest'
|
||||
|
||||
describe.skip('skipped', () => {
|
||||
bench('skipped', () => {
|
||||
throw new Error('should be skipped')
|
||||
})
|
||||
})
|
||||
|
||||
bench.skip('skipped', () => {
|
||||
throw new Error('should be skipped')
|
||||
})
|
||||
Loading…
x
Reference in New Issue
Block a user