feat: introduce the new reporter API (#7069)

Co-authored-by: Ari Perkkiö <ari.perkkio@gmail.com>
This commit is contained in:
Vladimir 2025-01-14 16:46:44 +01:00 committed by GitHub
parent faca4de870
commit 766624abd6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 2485 additions and 962 deletions

View File

@ -339,6 +339,10 @@ export default ({ mode }: { mode: string }) => {
text: 'Runner API',
link: '/advanced/runner',
},
{
text: 'Reporters API',
link: '/advanced/api/reporters',
},
{
text: 'Task Metadata',
link: '/advanced/metadata',

View File

@ -0,0 +1,311 @@
# Reporters
::: warning
This is an advanced API. If you just want to configure built-in reporters, read the ["Reporters"](/guide/reporters) guide.
:::
Vitest has its own test run lifecycle. These are represented by reporter's methods:
- [`onInit`](#oninit)
- [`onTestRunStart`](#ontestrunstart)
- [`onTestModuleQueued`](#ontestmodulequeued)
- [`onTestModuleCollected`](#ontestmodulecollected)
- [`onTestModuleStart`](#ontestmodulestart)
- [`onTestSuiteReady`](#ontestsuiteready)
- [`onHookStart(beforeAll)`](#onhookstart)
- [`onHookEnd(beforeAll)`](#onhookend)
- [`onTestCaseReady`](#ontestcaseready)
- [`onHookStart(beforeEach)`](#onhookstart)
- [`onHookEnd(beforeEach)`](#onhookend)
- [`onHookStart(afterEach)`](#onhookstart)
- [`onHookEnd(afterEach)`](#onhookend)
- [`onTestCaseResult`](#ontestcaseresult)
- [`onHookStart(afterAll)`](#onhookstart)
- [`onHookEnd(afterAll)`](#onhookend)
- [`onTestSuiteResult`](#ontestsuiteresult)
- [`onTestModuleEnd`](#ontestmoduleend)
- [`onTestRunEnd`](#ontestrunend)
Tests and suites within a single module will be reported in order unless they were skipped. All skipped tests are reported at the end of suite/module.
Note that since test modules can run in parallel, Vitest will report them in parallel.
This guide lists all supported reporter methods. However, don't forget that instead of creating your own reporter, you can [extend existing one](/advanced/reporters) instead:
```ts [custom-reporter.js]
import { BaseReporter } from 'vitest/reporters'
export default class CustomReporter extends BaseReporter {
onTestRunEnd(testModules, errors) {
console.log(testModule.length, 'tests finished running')
super.onTestRunEnd(testModules, errors)
}
}
```
## onInit
```ts
function onInit(vitest: Vitest): Awaitable<void>
```
This method is called when [Vitest](/advanced/api/vitest) was initiated or started, but before the tests were filtered.
::: info
Internally this method is called inside [`vitest.start`](/advanced/api/vitest#start), [`vitest.init`](/advanced/api/vitest#init) or [`vitest.mergeReports`](/advanced/api/vitest#mergereports). If you are using programmatic API, make sure to call either one dependning on your needs before calling [`vitest.runTestSpecifications`](/advanced/api/vitest#runtestspecifications), for example. Built-in CLI will always run methods in correct order.
:::
Note that you can also get access to `vitest` instance from test cases, suites and test modules via a [`project`](/advanced/api/test-project) property, but it might also be useful to store a reference to `vitest` in this method.
::: details Example
```ts
import type { Reporter, TestSpecification, Vitest } from 'vitest/node'
class MyReporter implements Reporter {
private vitest!: Vitest
onInit(vitest: Vitest) {
this.vitest = vitest
}
onTestRunStart(specifications: TestSpecification[]) {
console.log(
specifications.length,
'test files will run in',
this.vitest.config.root,
)
}
}
export default new MyReporter()
```
:::
## onTestRunStart
```ts
function onTestRunStart(
specifications: TestSpecification[]
): Awaitable<void>
```
This method is called when a new test run has started. It receives an array of [test specifications](/advanced/api/test-specification) scheduled to run. This array is readonly and available only for information purposes.
If Vitest didn't find any test files to run, this event will be invoked with an empty array, and then [`onTestRunEnd`](#ontestrunend) will be called immediately after.
::: details Example
```ts
import type { Reporter, TestSpecification } from 'vitest/node'
class MyReporter implements Reporter {
onTestRunStart(specifications: TestSpecification[]) {
console.log(specifications.length, 'test files will run')
}
}
export default new MyReporter()
```
:::
::: tip DEPRECATION NOTICE
This method was added in Vitest 3, replacing `onPathsCollected` and `onSpecsCollected`, both of which are now deprecated.
:::
## onTestRunEnd
```ts
function onTestRunEnd(
testModules: ReadonlyArray<TestModule>,
unhandledErrors: ReadonlyArray<SerializedError>,
reason: TestRunEndReason
): Awaitable<void>
```
This method is called after all tests have finished running and the coverage merged all reports, if it's enabled. Note that you can get the coverage information in [`onCoverage`](#oncoverage) hook.
It receives a readonly list of test modules. You can iterate over it via a [`testModule.children`](/advanced/api/test-collection) property to report the state and errors, if any.
The second argument is a readonly list of unhandled errors that Vitest wasn't able to attribute to any test. These can happen outside of the test run because of an error in a plugin, or inside the test run as a side-effect of a non-awaited function (for example, a timeout that threw an error after the test has finished running).
The third argument indicated why the test run was finished:
- `passed`: test run was finished normally and there are no errors
- `failed`: test run has at least one error (due to a syntax error during collection or an actual error during test execution)
- `interrupted`: test was interruped by [`vitest.cancelCurrentRun`](/advanced/api/vitest#cancelcurrentrun) call or `Ctrl+C` was pressed in the terminal (note that it's still possible to have failed tests in this case)
If Vitest didn't find any test files to run, this event will be invoked with empty arrays of modules and errors, and the state will depend on the value of [`config.passWithNoTests`](/config/#passwithnotests).
::: details Example
```ts
import type {
Reporter,
SerializedError,
TestModule,
TestRunEndReason,
TestSpecification
} from 'vitest/node'
class MyReporter implements Reporter {
onTestRunEnd(
testModules: ReadonlyArray<TestModule>,
unhandledErrors: ReadonlyArray<SerializedError>,
reason: TestRunEndReason,
) {
if (reason === 'passed') {
testModules.forEach(module => console.log(module.moduleId, 'succeeded'))
}
else if (reason === 'failed') {
// note that this will skip possible errors in suites
// you can get them from testSuite.errors()
for (const testCase of testModules.children.allTests()) {
if (testCase.result().state === 'failed') {
console.log(testCase.fullName, 'in', testCase.module.moduleId, 'failed')
console.log(testCase.result().errors)
}
}
}
else {
console.log('test run was interrupted, skipping report')
}
}
}
export default new MyReporter()
```
:::
::: tip DEPRECATION NOTICE
This method was added in Vitest 3, replacing `onFinished`, which is now deprecated.
:::
## onCoverage
```ts
function onCoverage(coverage: unknown): Awaitable<void>
```
This hook is called after coverage results have been processed. Coverage provider's reporters are called after this hook. The typings of `coverage` depends on the `coverage.provider`. For Vitest's default built-in providers you can import the types from `istanbul-lib-coverage` package:
```ts
import type { CoverageMap } from 'istanbul-lib-coverage'
declare function onCoverage(coverage: CoverageMap): Awaitable<void>
```
If Vitest didn't perform any coverage, this hook is not called.
## onTestModuleQueued
```ts
function onTestModuleQueued(testModule: TestModule): Awaitable<void>
```
This method is called right before Vitest imports the setup file and the test module itself. This means that `testModule` will have no [`children`](/advanced/api/test-suite#children) yet, but you can start reporting it as the next test to run.
## onTestModuleCollected
```ts
function onTestModuleCollected(testModule: TestModule): Awaitable<void>
```
This method is called when all tests inside the file were collected, meaning [`testModule.children`](/advanced/api/test-suite#children) collection is populated, but tests don't have any results yet.
## onTestModuleStart
```ts
function onTestModuleStart(testModule: TestModule): Awaitable<void>
```
This method is called right after [`onTestModuleCollected`](#ontestmodulecollected) unless Vitest runs in collection mode ([`vitest.collect()`](/advanced/api/vitest#collect) or `vitest collect` in the CLI), in this case it will not be called at all because there are no tests to run.
## onTestModuleEnd
```ts
function onTestModuleEnd(testModule: TestModule): Awaitable<void>
```
This method is called when every test in the module finished running. This means, every test inside [`testModule.children`](/advanced/api/test-suite#children) will have a `test.result()` that is not equal to `pending`.
## onHookStart
```ts
function onHookStart(context: ReportedHookContext): Awaitable<void>
```
This method is called when any of these hooks have started running:
- `beforeAll`
- `afterAll`
- `beforeEach`
- `afterEach`
If `beforeAll` or `afterAll` are started, the `entity` will be either [`TestSuite`](/advanced/api/test-suite) or [`TestModule`](/advanced/api/test-module).
If `beforeEach` or `afterEach` are started, the `entity` will always be [`TestCase`](/advanced/api/test-case).
::: warning
`onHookStart` method will not be called if the hook did not run during the test run.
:::
## onHookEnd
```ts
function onHookEnd(context: ReportedHookContext): Awaitable<void>
```
This method is called when any of these hooks have finished running:
- `beforeAll`
- `afterAll`
- `beforeEach`
- `afterEach`
If `beforeAll` or `afterAll` have finished, the `entity` will be either [`TestSuite`](/advanced/api/test-suite) or [`TestModule`](/advanced/api/test-module).
If `beforeEach` or `afterEach` have finished, the `entity` will always be [`TestCase`](/advanced/api/test-case).
::: warning
`onHookEnd` method will not be called if the hook did not run during the test run.
:::
## onTestSuiteReady
```ts
function onTestSuiteReady(testSuite: TestSuite): Awaitable<void>
```
This method is called before the suite starts to run its tests. This method is also called if the suite was skipped.
If the file doesn't have any suites, this method will not be called. Consider using `onTestModuleStart` to cover this use case.
## onTestSuiteResult
```ts
function onTestSuiteResult(testSuite: TestSuite): Awaitable<void>
```
This method is called after the suite has finished running tests. This method is also called if the suite was skipped.
If the file doesn't have any suites, this method will not be called. Consider using `onTestModuleEnd` to cover this use case.
## onTestCaseReady
```ts
function onTestCaseReady(testCase: TestCase): Awaitable<void>
```
This method is called before the test starts to run or it was skipped. Note that `beforeEach` and `afterEach` hooks are considered part of the test because they can influence the result.
::: warning
Notice that it's possible to have [`testCase.result()`](/advanced/api/test-case#result) with `passed` or `failed` state already when `onTestCaseReady` is called. This can happen if test was running too fast and both `onTestCaseReady` and `onTestCaseResult` were scheduled to run in the same microtask.
:::
## onTestCaseResult
```ts
function onTestCaseResult(testCase: TestCase): Awaitable<void>
```
This method is called when the test has finished running or was just skipped. Note that this will be called after the `afterEach` hook is finished, if there are any.
At this point, [`testCase.result()`](/advanced/api/test-case#result) will have non-pending state.

View File

@ -10,31 +10,6 @@ if (task.type === 'test') {
}
```
::: warning
We are planning to introduce a new Reporter API that will be using this API by default. For now, the Reporter API uses [runner tasks](/advanced/runner#tasks), but you can still access `TestCase` via `vitest.state.getReportedEntity` method:
```ts
import type { RunnerTestFile, TestModule, Vitest } from 'vitest/node'
class Reporter {
private vitest!: Vitest
onInit(vitest: Vitest) {
this.vitest = vitest
}
onFinished(files: RunnerTestFile[]) {
for (const file of files) {
const testModule = this.vitest.getReportedEntity(file) as TestModule
for (const test of testModule.children.allTests()) {
console.log(test) // TestCase
}
}
}
}
```
:::
## project
This references the [`TestProject`](/advanced/api/test-project) that the test belongs to.
@ -124,12 +99,13 @@ Parent [suite](/advanced/api/test-suite). If the test was called directly inside
```ts
interface TaskOptions {
each: boolean | undefined
concurrent: boolean | undefined
shuffle: boolean | undefined
retry: number | undefined
repeats: number | undefined
mode: 'run' | 'only' | 'skip' | 'todo'
readonly each: boolean | undefined
readonly fails: boolean | undefined
readonly concurrent: boolean | undefined
readonly shuffle: boolean | undefined
readonly retry: number | undefined
readonly repeats: number | undefined
readonly mode: 'run' | 'only' | 'skip' | 'todo'
}
```
@ -143,14 +119,6 @@ function ok(): boolean
Checks if the test did not fail the suite. If the test is not finished yet or was skipped, it will return `true`.
## skipped
```ts
function skipped(): boolean
```
Checks if the test was skipped during collection or dynamically with `ctx.skip()`.
## meta
```ts
@ -174,10 +142,23 @@ If the test did not finish running yet, the meta will be an empty object.
## result
```ts
function result(): TestResult | undefined
function result(): TestResult
```
Test results. It will be `undefined` if test is skipped during collection, not finished yet or was just collected.
Test results. If test is not finished yet or was just collected, it will be equal to `TestResultPending`:
```ts
export interface TestResultPending {
/**
* The test was collected, but didn't finish running yet.
*/
readonly state: 'pending'
/**
* Pending tests have no errors.
*/
readonly errors: undefined
}
```
If the test was skipped, the return value will be `TestResultSkipped`:
@ -187,15 +168,15 @@ interface TestResultSkipped {
* The test was skipped with `skip` or `todo` flag.
* You can see which one was used in the `options.mode` option.
*/
state: 'skipped'
readonly state: 'skipped'
/**
* Skipped tests have no errors.
*/
errors: undefined
readonly errors: undefined
/**
* A custom note passed down to `ctx.skip(note)`.
*/
note: string | undefined
readonly note: string | undefined
}
```
@ -210,26 +191,26 @@ interface TestResultFailed {
/**
* The test failed to execute.
*/
state: 'failed'
readonly state: 'failed'
/**
* Errors that were thrown during the test execution.
*/
errors: TestError[]
readonly errors: ReadonlyArray<TestError>
}
```
If the test passed, the retunr value will be `TestResultPassed`:
If the test passed, the return value will be `TestResultPassed`:
```ts
interface TestResultPassed {
/**
* The test passed successfully.
*/
state: 'passed'
readonly state: 'passed'
/**
* Errors that were thrown during the test execution.
*/
errors: TestError[] | undefined
readonly errors: ReadonlyArray<TestError> | undefined
}
```
@ -250,32 +231,36 @@ interface TestDiagnostic {
/**
* If the duration of the test is above `slowTestThreshold`.
*/
slow: boolean
readonly slow: boolean
/**
* The amount of memory used by the test in bytes.
* This value is only available if the test was executed with `logHeapUsage` flag.
*/
heap: number | undefined
readonly heap: number | undefined
/**
* The time it takes to execute the test in ms.
*/
duration: number
readonly duration: number
/**
* The time in ms when the test started.
*/
startTime: number
readonly startTime: number
/**
* The amount of times the test was retried.
*/
retryCount: number
readonly retryCount: number
/**
* The amount of times the test was repeated as configured by `repeats` option.
* This value can be lower if the test failed during the repeat and no `retry` is configured.
*/
repeatCount: number
readonly repeatCount: number
/**
* If test passed on a second retry.
*/
flaky: boolean
readonly flaky: boolean
}
```
::: info
`diagnostic()` will return `undefined` if the test was not scheduled to run yet.
:::

View File

@ -57,16 +57,14 @@ for (const suite of module.children.allSuites()) {
## allTests
```ts
function allTests(
state?: TestResult['state'] | 'running'
): Generator<TestCase, undefined, void>
function allTests(state?: TestState): Generator<TestCase, undefined, void>
```
Filters all tests that are part of this collection and its children.
```ts
for (const test of module.children.allTests()) {
if (!test.result()) {
if (test.result().state === 'pending') {
console.log('test', test.fullName, 'did not finish')
}
}
@ -77,9 +75,7 @@ You can pass down a `state` value to filter tests by the state.
## tests
```ts
function tests(
state?: TestResult['state'] | 'running'
): Generator<TestCase, undefined, void>
function tests(state?: TestState): Generator<TestCase, undefined, void>
```
Filters only the tests that are part of this collection. You can pass down a `state` value to filter tests by the state.

View File

@ -10,35 +10,28 @@ if (task.type === 'module') {
}
```
The `TestModule` inherits all methods and properties from the [`TestSuite`](/advanced/api/test-module). This guide will only list methods and properties unique to the `TestModule`
::: warning
We are planning to introduce a new Reporter API that will be using this API by default. For now, the Reporter API uses [runner tasks](/advanced/runner#tasks), but you can still access `TestModule` via `vitest.state.getReportedEntity` method:
```ts
import type { RunnerTestFile, TestModule, Vitest } from 'vitest/node'
class Reporter {
private vitest!: Vitest
onInit(vitest: Vitest) {
this.vitest = vitest
}
onFinished(files: RunnerTestFile[]) {
for (const file of files) {
const testModule = this.vitest.state.getReportedEntity(file) as TestModule
console.log(testModule) // TestModule
}
}
}
```
::: warning Extending Suite Methods
The `TestModule` class inherits all methods and properties from the [`TestSuite`](/advanced/api/test-suite). This guide will only list methods and properties unique to the `TestModule`.
:::
## moduleId
This is usually an absolute unix file path (even on Windows). It can be a virtual id if the file is not on the disk. This value corresponds to Vite's `ModuleGraph` id.
```ts
'C:/Users/Documents/project/example.test.ts' // ✅
'/Users/mac/project/example.test.ts' // ✅
'C:\\Users\\Documents\\project\\example.test.ts' // ❌
```
## state
```ts
function state(): TestModuleState
```
Works the same way as [`testSuite.state()`](/advanced/api/test-suite#state), but can also return `queued` if module wasn't executed yet.
## diagnostic
```ts
@ -52,23 +45,23 @@ interface ModuleDiagnostic {
/**
* The time it takes to import and initiate an environment.
*/
environmentSetupDuration: number
readonly environmentSetupDuration: number
/**
* The time it takes Vitest to setup test harness (runner, mocks, etc.).
*/
prepareDuration: number
readonly prepareDuration: number
/**
* The time it takes to import the test module.
* This includes importing everything in the module and executing suite callbacks.
*/
collectDuration: number
readonly collectDuration: number
/**
* The time it takes to import the setup module.
*/
setupDuration: number
readonly setupDuration: number
/**
* Accumulated duration of all tests and hooks in the module.
*/
duration: number
readonly duration: number
}
```

View File

@ -13,6 +13,10 @@ const specification = project.createSpecification(
`createSpecification` expects resolved module ID. It doesn't auto-resolve the file or check that it exists on the file system.
## taskId
[Test module's](/advanced/api/test-suite#id) identifier.
## project
This references the [`TestProject`](/advanced/api/test-project) that the test module belongs to.
@ -27,6 +31,10 @@ The ID of the module in Vite's module graph. Usually, it's an absolute file path
'C:\\Users\\Documents\\project\\example.test.ts' // ❌
```
## testModule
Instance of [`TestModule`](/advanced/api/test-module) assosiated with the specification. If test wasn't queued yet, this will be `undefined`.
## pool <Badge type="warning">experimental</Badge> {#pool}
The [`pool`](/config/#pool) in which the test module will run.

View File

@ -10,31 +10,6 @@ if (task.type === 'suite') {
}
```
::: warning
We are planning to introduce a new Reporter API that will be using this API by default. For now, the Reporter API uses [runner tasks](/advanced/runner#tasks), but you can still access `TestSuite` via `vitest.state.getReportedEntity` method:
```ts
import type { RunnerTestFile, TestModule, Vitest } from 'vitest/node'
class Reporter {
private vitest!: Vitest
onInit(vitest: Vitest) {
this.vitest = vitest
}
onFinished(files: RunnerTestFile[]) {
for (const file of files) {
const testModule = this.vitest.state.getReportedEntity(file) as TestModule
for (const suite of testModule.children.allSuites()) {
console.log(suite) // TestSuite
}
}
}
}
```
:::
## project
This references the [`TestProject`](/advanced/api/test-project) that the test belongs to.
@ -125,12 +100,13 @@ Parent suite. If the suite was called directly inside the [module](/advanced/api
```ts
interface TaskOptions {
each: boolean | undefined
concurrent: boolean | undefined
shuffle: boolean | undefined
retry: number | undefined
repeats: number | undefined
mode: 'run' | 'only' | 'skip' | 'todo'
readonly each: boolean | undefined
readonly fails: boolean | undefined
readonly concurrent: boolean | undefined
readonly shuffle: boolean | undefined
readonly retry: number | undefined
readonly repeats: number | undefined
readonly mode: 'run' | 'only' | 'skip' | 'todo'
}
```
@ -153,7 +129,21 @@ for (const task of suite.children) {
```
::: warning
Note that `suite.children` will only iterate the first level of nesting, it won't go deeper.
Note that `suite.children` will only iterate the first level of nesting, it won't go deeper. If you need to iterate over all tests or suites, use [`children.allTests()`](/advanced/api/test-collection#alltests) or [`children.allSuites()`](/advanced/api/test-collection#allsuites). If you need to iterate over everything, use recursive function:
```ts
function visit(collection: TestCollection) {
for (const task of collection) {
if (task.type === 'suite') {
// report a suite
visit(task.children)
}
else {
// report a test
}
}
}
```
:::
## ok
@ -164,13 +154,22 @@ function ok(): boolean
Checks if the suite has any failed tests. This will also return `false` if suite failed during collection. In that case, check the [`errors()`](#errors) for thrown errors.
## skipped
## state
```ts
function skipped(): boolean
function state(): TestSuiteState
```
Checks if the suite was skipped during collection.
Checks the running state of the suite. Possible return values:
- **pending**: the tests in this suite did not finish running yet.
- **failed**: this suite has failed tests or they couldn't be collected. If [`errors()`](#errors) is not empty, it means the suite failed to collect tests.
- **passed**: every test inside this suite has passed.
- **skipped**: this suite was skipped during collection.
::: warning
Note that [test module](/advanced/api/test-module) also has a `state` method that returns the same values, but it can also return an additional `queued` state if the module wasn't executed yet.
:::
## errors
@ -189,5 +188,5 @@ describe('collection failed', () => {
```
::: warning
Note that errors are serialized into simple object: `instanceof Error` will always return `false`.
Note that errors are serialized into simple objects: `instanceof Error` will always return `false`.
:::

View File

@ -20,25 +20,22 @@ test('custom', ({ task }) => {
})
```
Once a test is completed, Vitest will send a task including the result and `meta` to the Node.js process using RPC. To intercept and process this task, you can utilize the `onTaskUpdate` method available in your reporter implementation:
Once a test is completed, Vitest will send a task including the result and `meta` to the Node.js process using RPC, and then report it in `onTestCaseResult` and other hooks that have access to tasks. To process this test case, you can utilize the `onTestCaseResult` method available in your reporter implementation:
```ts [custom-reporter.js]
export default {
// you can intercept packs if needed
onTaskUpdate(packs) {
const [id, result, meta] = packs[0]
},
// meta is located on every task inside "onFinished"
onFinished(files) {
files[0].meta.done === true
files[0].tasks[0].meta.custom === 'some-custom-handler'
}
}
```
import type { Reporter, TestCase, TestModule } from 'vitest/node'
::: warning
Vitest can send several tasks at the same time if several tests are completed in a short period of time.
:::
export default {
onTestCaseResult(testCase: TestCase) {
// custom === 'some-custom-handler' ✅
const { custom } = testCase.meta()
},
onTestRunEnd(testModule: TestModule) {
testModule.meta().done === true
testModule.children.at(0).meta().custom === 'some-custom-handler'
}
} satisfies Reporter
```
::: danger BEWARE
Vitest uses different methods to communicate with the Node.js process.
@ -56,9 +53,11 @@ You can also get this information from Vitest state when tests finished running:
```ts
const vitest = await createVitest('test')
await vitest.start()
vitest.state.getFiles()[0].meta.done === true
vitest.state.getFiles()[0].tasks[0].meta.custom === 'some-custom-handler'
const { testModules } = await vitest.start()
const testModule = testModules[0]
testModule.meta().done === true
testModule.children.at(0).meta().custom === 'some-custom-handler'
```
It's also possible to extend type definitions when using TypeScript:

View File

@ -1179,6 +1179,16 @@ test('performs an organization query', async () => {
::: tip
This hook is always called in reverse order and is not affected by [`sequence.hooks`](/config/#sequence-hooks) option.
<!-- TODO: should it be called? https://github.com/vitest-dev/vitest/pull/7069 -->
Note that this hook is not called if test was skipped with a dynamic `ctx.skip()` call:
```ts{2}
test('skipped dynamically', (t) => {
onTestFinished(() => {}) // not called
t.skip()
})
```
:::
### onTestFailed

View File

@ -1,4 +1,4 @@
import type { CancelReason, File, Suite, Task, TaskResultPack, VitestRunner } from '@vitest/runner'
import type { CancelReason, File, Suite, Task, TaskEventPack, TaskResultPack, VitestRunner } from '@vitest/runner'
import type { SerializedConfig, WorkerGlobalState } from 'vitest'
import type { VitestExecutor } from 'vitest/execute'
import type { VitestBrowserClientMocker } from './mocker'
@ -131,8 +131,8 @@ export function createBrowserRunner(
return rpc().onCollected(files)
}
onTaskUpdate = (task: TaskResultPack[]): Promise<void> => {
return rpc().onTaskUpdate(task)
onTaskUpdate = (task: TaskResultPack[], events: TaskEventPack[]): Promise<void> => {
return rpc().onTaskUpdate(task, events)
}
importFile = async (filepath: string) => {

View File

@ -1,6 +1,6 @@
import type { Duplex } from 'node:stream'
import type { ErrorWithDiff } from 'vitest'
import type { BrowserCommandContext, ResolveSnapshotPathHandlerContext, TestModule, TestProject } from 'vitest/node'
import type { BrowserCommandContext, ResolveSnapshotPathHandlerContext, TestProject } from 'vitest/node'
import type { WebSocket } from 'ws'
import type { ParentBrowserProject } from './projectParent'
import type { BrowserServerState } from './state'
@ -111,23 +111,19 @@ export function setupBrowserRpc(globalServer: ParentBrowserProject) {
vitest.state.catchError(error, type)
},
async onQueued(file) {
vitest.state.collectFiles(project, [file])
const testModule = vitest.state.getReportedEntity(file) as TestModule
await vitest.report('onTestModuleQueued', testModule)
await vitest._testRun.enqueued(project, file)
},
async onCollected(files) {
vitest.state.collectFiles(project, files)
await vitest.report('onCollected', files)
await vitest._testRun.collected(project, files)
},
async onTaskUpdate(packs) {
vitest.state.updateTasks(packs)
await vitest.report('onTaskUpdate', packs)
async onTaskUpdate(packs, events) {
await vitest._testRun.updated(packs, events)
},
onAfterSuiteRun(meta) {
vitest.coverageProvider?.onAfterSuiteRun(meta)
},
sendLog(log) {
return vitest.report('onUserConsoleLog', log)
return vitest._testRun.log(log)
},
resolveSnapshotPath(testPath) {
return vitest.snapshot.resolvePath<ResolveSnapshotPathHandlerContext>(testPath, {

View File

@ -1,14 +1,15 @@
import type { ServerIdResolution, ServerMockResolution } from '@vitest/mocker/node'
import type { TaskEventPack, TaskResultPack } from '@vitest/runner'
import type { BirpcReturn } from 'birpc'
import type { AfterSuiteRunMeta, CancelReason, Reporter, RunnerTestFile, SnapshotResult, TaskResultPack, UserConsoleLog } from 'vitest'
import type { AfterSuiteRunMeta, CancelReason, Reporter, RunnerTestFile, SnapshotResult, UserConsoleLog } from 'vitest'
export interface WebSocketBrowserHandlers {
resolveSnapshotPath: (testPath: string) => string
resolveSnapshotRawPath: (testPath: string, rawPath: string) => string
onUnhandledError: (error: unknown, type: string) => Promise<void>
onQueued: (file: RunnerTestFile) => void
onCollected: (files?: RunnerTestFile[]) => Promise<void>
onTaskUpdate: (packs: TaskResultPack[]) => void
onCollected: (files: RunnerTestFile[]) => Promise<void>
onTaskUpdate: (packs: TaskResultPack[], events: TaskEventPack[]) => void
onAfterSuiteRun: (meta: AfterSuiteRunMeta) => void
onCancel: (reason: CancelReason) => void
getCountOfFailedTests: () => number

View File

@ -68,7 +68,8 @@ export function createTestContext(
context.task = test
context.skip = (note?: string) => {
test.pending = true
test.result ??= { state: 'skip' }
test.result.pending = true
throw new PendingError('test is skipped; abort execution', test, note)
}

View File

@ -160,6 +160,8 @@ export const onTestFailed: TaskHook<OnTestFailedHandler> = createTestHook(
*
* **Note:** The `onTestFinished` hooks are running in reverse order of their registration. You can configure this by changing the `sequence.hooks` option in the config file.
*
* **Note:** The `onTestFinished` hook is not called if the test is canceled with a dynamic `ctx.skip()` call.
*
* @param {Function} fn - The callback function to be executed after a test finishes. The function can receive parameters providing details about the completed test, including its success or failure status.
* @param {number} [timeout] - Optional timeout in milliseconds for the hook. If not provided, the default hook timeout from the runner's configuration is used.
* @throws {Error} Throws an error if the function is not called within a test.

View File

@ -3,7 +3,6 @@ import type { DiffOptions } from '@vitest/utils/diff'
import type { FileSpecification, VitestRunner } from './types/runner'
import type {
File,
HookCleanupCallback,
HookListener,
SequenceHooks,
Suite,
@ -13,6 +12,7 @@ import type {
TaskResult,
TaskResultPack,
TaskState,
TaskUpdateEvent,
Test,
TestContext,
} from './types/tasks'
@ -31,21 +31,32 @@ const now = globalThis.performance ? globalThis.performance.now.bind(globalThis.
const unixNow = Date.now
function updateSuiteHookState(
suite: Task,
task: Task,
name: keyof SuiteHooks,
state: TaskState,
runner: VitestRunner,
) {
if (!suite.result) {
suite.result = { state: 'run' }
if (!task.result) {
task.result = { state: 'run' }
}
if (!suite.result?.hooks) {
suite.result.hooks = {}
if (!task.result.hooks) {
task.result.hooks = {}
}
const suiteHooks = suite.result.hooks
const suiteHooks = task.result.hooks
if (suiteHooks) {
suiteHooks[name] = state
updateTask(suite, runner)
let event: TaskUpdateEvent = state === 'run' ? 'before-hook-start' : 'before-hook-end'
if (name === 'afterAll' || name === 'afterEach') {
event = state === 'run' ? 'after-hook-start' : 'after-hook-end'
}
updateTask(
event,
task,
runner,
)
}
}
@ -113,10 +124,10 @@ export async function callSuiteHook<T extends keyof SuiteHooks>(
name: T,
runner: VitestRunner,
args: SuiteHooks[T][0] extends HookListener<infer A, any> ? A : never,
): Promise<HookCleanupCallback[]> {
): Promise<unknown[]> {
const sequence = runner.config.sequence.hooks
const callbacks: HookCleanupCallback[] = []
const callbacks: unknown[] = []
// stop at file level
const parentSuite: Suite | null = 'filepath' in suite ? null : suite.suite || suite.file
@ -126,10 +137,12 @@ export async function callSuiteHook<T extends keyof SuiteHooks>(
)
}
updateSuiteHookState(currentTask, name, 'run', runner)
const hooks = getSuiteHooks(suite, name, sequence)
if (hooks.length > 0) {
updateSuiteHookState(currentTask, name, 'run', runner)
}
if (sequence === 'parallel') {
callbacks.push(
...(await Promise.all(hooks.map(hook => (hook as any)(...args)))),
@ -141,7 +154,9 @@ export async function callSuiteHook<T extends keyof SuiteHooks>(
}
}
updateSuiteHookState(currentTask, name, 'pass', runner)
if (hooks.length > 0) {
updateSuiteHookState(currentTask, name, 'pass', runner)
}
if (name === 'afterEach' && parentSuite) {
callbacks.push(
@ -153,10 +168,12 @@ export async function callSuiteHook<T extends keyof SuiteHooks>(
}
const packs = new Map<string, [TaskResult | undefined, TaskMeta]>()
const eventsPacks: [string, TaskUpdateEvent][] = []
let updateTimer: any
let previousUpdate: Promise<void> | undefined
export function updateTask(task: Task, runner: VitestRunner): void {
export function updateTask(event: TaskUpdateEvent, task: Task, runner: VitestRunner): void {
eventsPacks.push([task.id, event])
packs.set(task.id, [task.result, task.meta])
const { clearTimeout, setTimeout } = getSafeTimers()
@ -176,13 +193,14 @@ async function sendTasksUpdate(runner: VitestRunner) {
const taskPacks = Array.from(packs).map<TaskResultPack>(([id, task]) => {
return [id, task[0], task[1]]
})
const p = runner.onTaskUpdate?.(taskPacks)
const p = runner.onTaskUpdate?.(taskPacks, eventsPacks)
eventsPacks.length = 0
packs.clear()
return p
}
}
async function callCleanupHooks(cleanups: HookCleanupCallback[]) {
async function callCleanupHooks(cleanups: unknown[]) {
await Promise.all(
cleanups.map(async (fn) => {
if (typeof fn !== 'function') {
@ -201,7 +219,10 @@ export async function runTest(test: Test, runner: VitestRunner): Promise<void> {
}
if (test.result?.state === 'fail') {
updateTask(test, runner)
// should not be possible to get here, I think this is just copy pasted from suite
// TODO: maybe someone fails tests in `beforeAll` hooks?
// https://github.com/vitest-dev/vitest/pull/7069
updateTask('test-failed-early', test, runner)
return
}
@ -212,7 +233,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise<void> {
startTime: unixNow(),
retryCount: 0,
}
updateTask(test, runner)
updateTask('test-prepare', test, runner)
setCurrentTest(test)
@ -222,7 +243,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise<void> {
for (let repeatCount = 0; repeatCount <= repeats; repeatCount++) {
const retry = test.retry ?? 0
for (let retryCount = 0; retryCount <= retry; retryCount++) {
let beforeEachCleanups: HookCleanupCallback[] = []
let beforeEachCleanups: unknown[] = []
try {
await runner.onBeforeTryTask?.(test, {
retry: retryCount,
@ -271,10 +292,10 @@ export async function runTest(test: Test, runner: VitestRunner): Promise<void> {
}
// skipped with new PendingError
if (test.pending || test.result?.state === 'skip') {
if (test.result?.pending || test.result?.state === 'skip') {
test.mode = 'skip'
test.result = { state: 'skip', note: test.result?.note }
updateTask(test, runner)
test.result = { state: 'skip', note: test.result?.note, pending: true }
updateTask('test-finished', test, runner)
setCurrentTest(undefined)
return
}
@ -309,8 +330,8 @@ export async function runTest(test: Test, runner: VitestRunner): Promise<void> {
)
}
delete test.onFailed
delete test.onFinished
test.onFailed = undefined
test.onFinished = undefined
if (test.result.state === 'pass') {
break
@ -323,7 +344,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise<void> {
}
// update retry info
updateTask(test, runner)
updateTask('test-retried', test, runner)
}
}
@ -346,13 +367,14 @@ export async function runTest(test: Test, runner: VitestRunner): Promise<void> {
await runner.onAfterRunTask?.(test)
updateTask(test, runner)
updateTask('test-finished', test, runner)
}
function failTask(result: TaskResult, err: unknown, diffOptions: DiffOptions | undefined) {
if (err instanceof PendingError) {
result.state = 'skip'
result.note = err.note
result.pending = true
return
}
@ -369,7 +391,7 @@ function markTasksAsSkipped(suite: Suite, runner: VitestRunner) {
suite.tasks.forEach((t) => {
t.mode = 'skip'
t.result = { ...t.result, state: 'skip' }
updateTask(t, runner)
updateTask('test-finished', t, runner)
if (t.type === 'suite') {
markTasksAsSkipped(t, runner)
}
@ -381,26 +403,33 @@ export async function runSuite(suite: Suite, runner: VitestRunner): Promise<void
if (suite.result?.state === 'fail') {
markTasksAsSkipped(suite, runner)
updateTask(suite, runner)
// failed during collection
updateTask('suite-failed-early', suite, runner)
return
}
const start = now()
const mode = suite.mode
suite.result = {
state: 'run',
state: mode === 'skip' || mode === 'todo' ? mode : 'run',
startTime: unixNow(),
}
updateTask(suite, runner)
updateTask('suite-prepare', suite, runner)
let beforeAllCleanups: HookCleanupCallback[] = []
let beforeAllCleanups: unknown[] = []
if (suite.mode === 'skip') {
suite.result.state = 'skip'
updateTask('suite-finished', suite, runner)
}
else if (suite.mode === 'todo') {
suite.result.state = 'todo'
updateTask('suite-finished', suite, runner)
}
else {
try {
@ -476,10 +505,10 @@ export async function runSuite(suite: Suite, runner: VitestRunner): Promise<void
}
}
updateTask(suite, runner)
suite.result.duration = now() - start
updateTask('suite-finished', suite, runner)
await runner.onAfterRunSuite?.(suite)
}
}

View File

@ -39,12 +39,14 @@ export type {
TaskBase,
TaskContext,
TaskCustomOptions,
TaskEventPack,
TaskHook,
TaskMeta,
TaskPopulated,
TaskResult,
TaskResultPack,
TaskState,
TaskUpdateEvent,
Test,
TestAPI,
TestContext,

View File

@ -5,6 +5,7 @@ import type {
SequenceSetupFiles,
Suite,
Task,
TaskEventPack,
TaskResultPack,
Test,
TestContext,
@ -128,7 +129,7 @@ export interface VitestRunner {
/**
* Called, when a task is updated. The same as "onTaskUpdate" in a reporter, but this is running in the same thread as tests.
*/
onTaskUpdate?: (task: TaskResultPack[]) => Promise<void>
onTaskUpdate?: (task: TaskResultPack[], events: TaskEventPack[]) => Promise<void>
/**
* Called before running all tests in collected paths.

View File

@ -78,10 +78,6 @@ export interface TaskPopulated extends TaskBase {
* File task. It's the root task of the file.
*/
file: File
/**
* Whether the task was skipped by calling `t.skip()`.
*/
pending?: boolean
/**
* Whether the task should succeed if it fails. If the task fails, it will be marked as passed.
*/
@ -152,6 +148,11 @@ export interface TaskResult {
repeatCount?: number
/** @private */
note?: string
/**
* Whether the task was skipped by calling `t.skip()`.
* @internal
*/
pending?: boolean
}
/**
@ -173,6 +174,30 @@ export type TaskResultPack = [
meta: TaskMeta,
]
export type TaskEventPack = [
/**
* Unique task identifier from `task.id`.
*/
id: string,
/**
* The name of the event that triggered the update.
*/
event: TaskUpdateEvent,
]
export type TaskUpdateEvent =
| 'test-failed-early'
| 'suite-failed-early'
| 'test-prepare'
| 'test-finished'
| 'test-retried'
| 'suite-prepare'
| 'suite-finished'
| 'before-hook-start'
| 'before-hook-end'
| 'after-hook-start'
| 'after-hook-end'
export interface Suite extends TaskBase {
type: 'suite'
/**

View File

@ -48,9 +48,8 @@ export function setup(ctx: Vitest, _server?: ViteDevServer) {
function setupClient(ws: WebSocket) {
const rpc = createBirpc<WebSocketEvents, WebSocketHandlers>(
{
async onTaskUpdate(packs) {
ctx.state.updateTasks(packs)
await ctx.report('onTaskUpdate', packs)
async onTaskUpdate(packs, events) {
await ctx._testRun.updated(packs, events)
},
getFiles() {
return ctx.state.getFiles()

View File

@ -1,4 +1,4 @@
import type { File, TaskResultPack } from '@vitest/runner'
import type { File, TaskEventPack, TaskResultPack } from '@vitest/runner'
import type { BirpcReturn } from 'birpc'
import type { SerializedConfig } from '../runtime/config'
import type { SerializedTestSpecification } from '../runtime/types/utils'
@ -27,7 +27,7 @@ export interface TransformResultWithSource {
}
export interface WebSocketHandlers {
onTaskUpdate: (packs: TaskResultPack[]) => void
onTaskUpdate: (packs: TaskResultPack[], events: TaskEventPack[]) => void
getFiles: () => File[]
getTestFiles: () => Promise<SerializedTestSpecification[]>
getPaths: () => string[]

View File

@ -256,7 +256,7 @@ export function formatCollectedAsJSON(files: TestModule[]) {
files.forEach((file) => {
for (const test of file.children.allTests()) {
if (test.skipped()) {
if (test.result().state === 'skipped') {
continue
}
const result: TestCollectJSONResult = {
@ -280,7 +280,7 @@ export function formatCollectedAsString(testModules: TestModule[]) {
testModules.forEach((testModule) => {
for (const test of testModule.children.allTests()) {
if (test.skipped()) {
if (test.result().state === 'skipped') {
continue
}
const fullName = `${test.module.task.name} > ${test.fullName}`

View File

@ -1,4 +1,4 @@
import type { CancelReason, File, TaskResultPack } from '@vitest/runner'
import type { CancelReason, File } from '@vitest/runner'
import type { Awaitable } from '@vitest/utils'
import type { Writable } from 'node:stream'
import type { ViteDevServer } from 'vite'
@ -24,6 +24,7 @@ import { defaultBrowserPort, workspacesFiles as workspaceFiles } from '../consta
import { getCoverageProvider } from '../integrations/coverage'
import { distDir } from '../paths'
import { wildcardPatternToRegExp } from '../utils/base'
import { convertTasksToEvents } from '../utils/tasks'
import { BrowserSessions } from './browser/sessions'
import { VitestCache } from './cache'
import { resolveConfig } from './config/resolveConfig'
@ -36,6 +37,7 @@ import { BlobReporter, readBlobs } from './reporters/blob'
import { createBenchmarkReporters, createReporters } from './reporters/utils'
import { VitestSpecifications } from './specifications'
import { StateManager } from './state'
import { TestRun } from './test-run'
import { VitestWatcher } from './watcher'
import { resolveBrowserWorkspace, resolveWorkspace } from './workspace/resolveWorkspace'
@ -94,6 +96,7 @@ export class Vitest {
/** @internal */ reporters: Reporter[] = undefined!
/** @internal */ vitenode: ViteNodeServer = undefined!
/** @internal */ runner: ViteNodeRunner = undefined!
/** @internal */ _testRun: TestRun = undefined!
private isFirstRun = true
private restartsCount = 0
@ -214,6 +217,7 @@ export class Vitest {
this._state = new StateManager()
this._cache = new VitestCache(this.version)
this._snapshot = new SnapshotManager({ ...resolved.snapshotOptions })
this._testRun = new TestRun(this)
if (this.config.watch) {
this.watcher.registerWatcher()
@ -448,43 +452,36 @@ export class Vitest {
await this.report('onInit', this)
await this.report('onPathsCollected', files.flatMap(f => f.filepath))
const workspaceSpecs = new Map<TestProject, File[]>()
const specifications: TestSpecification[] = []
for (const file of files) {
const project = this.getProjectByName(file.projectName || '')
const specs = workspaceSpecs.get(project) || []
specs.push(file)
workspaceSpecs.set(project, specs)
const specification = project.createSpecification(file.filepath, undefined, file.pool)
specifications.push(specification)
}
for (const [project, files] of workspaceSpecs) {
const filepaths = files.map(f => f.filepath)
this.state.clearFiles(project, filepaths)
files.forEach((file) => {
file.logs?.forEach(log => this.state.updateUserLog(log))
})
this.state.collectFiles(project, files)
}
await this.report('onCollected', files).catch(noop)
await this.report('onSpecsCollected', specifications.map(spec => spec.toJSON()))
await this._testRun.start(specifications).catch(noop)
for (const file of files) {
const logs: UserConsoleLog[] = []
const taskPacks: TaskResultPack[] = []
const project = this.getProjectByName(file.projectName || '')
await this._testRun.enqueued(project, file).catch(noop)
await this._testRun.collected(project, [file]).catch(noop)
const tasks = getTasks(file)
for (const task of tasks) {
const logs: UserConsoleLog[] = []
const { packs, events } = convertTasksToEvents(file, (task) => {
if (task.logs) {
logs.push(...task.logs)
}
taskPacks.push([task.id, task.result, task.meta])
}
})
logs.sort((log1, log2) => log1.time - log2.time)
for (const log of logs) {
await this.report('onUserConsoleLog', log).catch(noop)
await this._testRun.log(log).catch(noop)
}
await this.report('onTaskUpdate', taskPacks).catch(noop)
await this._testRun.updated(packs, events).catch(noop)
}
if (hasFailed(files)) {
@ -492,7 +489,7 @@ export class Vitest {
}
this._checkUnhandledErrors(errors)
await this.report('onFinished', files, errors)
await this._testRun.end(specifications, errors).catch(noop)
await this.initCoverageProvider()
await this.coverageProvider?.mergeReports?.(coverages)
@ -552,15 +549,24 @@ export class Vitest {
// if run with --changed, don't exit if no tests are found
if (!files.length) {
// Report coverage for uncovered files
const throwAnError = !this.config.watch || !(this.config.changed || this.config.related?.length)
await this._testRun.start([])
const coverage = await this.coverageProvider?.generateCoverage?.({ allTestsRun: true })
// set exit code before calling `onTestRunEnd` so the lifecycle is consistent
if (throwAnError) {
const exitCode = this.config.passWithNoTests ? 0 : 1
process.exitCode = exitCode
}
await this._testRun.end([], [], coverage)
// Report coverage for uncovered files
await this.reportCoverage(coverage, true)
this.logger.printNoTestFound(filters)
if (!this.config.watch || !(this.config.changed || this.config.related?.length)) {
const exitCode = this.config.passWithNoTests ? 0 : 1
process.exitCode = exitCode
if (throwAnError) {
throw new FilesNotFoundError(this.mode)
}
}
@ -670,6 +676,7 @@ export class Vitest {
await this.report('onPathsCollected', filepaths)
await this.report('onSpecsCollected', specs.map(spec => spec.toJSON()))
await this._testRun.start(specs)
// previous run
await this.runningPromise
@ -716,13 +723,12 @@ export class Vitest {
}
}
finally {
// can be duplicate files if different projects are using the same file
const files = Array.from(new Set(specs.map(spec => spec.moduleId)))
const errors = this.state.getUnhandledErrors()
// TODO: wait for coverage only if `onFinished` is defined
const coverage = await this.coverageProvider?.generateCoverage({ allTestsRun })
const errors = this.state.getUnhandledErrors()
this._checkUnhandledErrors(errors)
await this.report('onFinished', this.state.getFiles(files), errors, coverage)
await this._testRun.end(specs, errors, coverage)
await this.reportCoverage(coverage, allTestsRun)
}
})()

View File

@ -1,7 +1,6 @@
import type { RawSourceMap } from 'vite-node'
import type { RuntimeRPC } from '../../types/rpc'
import type { TestProject } from '../project'
import type { TestModule } from '../reporters/reported-tasks'
import type { ResolveSnapshotPathHandlerContext } from '../types/config'
import { mkdir, writeFile } from 'node:fs/promises'
import { join } from 'pathe'
@ -15,7 +14,7 @@ interface MethodsOptions {
}
export function createMethodsRPC(project: TestProject, options: MethodsOptions = {}): RuntimeRPC {
const ctx = project.ctx
const ctx = project.vitest
const cacheFs = options.cacheFs ?? false
return {
snapshotSaved(snapshot) {
@ -79,35 +78,24 @@ export function createMethodsRPC(project: TestProject, options: MethodsOptions =
ctx.state.collectPaths(paths)
return ctx.report('onPathsCollected', paths)
},
onQueued(file) {
ctx.state.collectFiles(project, [file])
const testModule = ctx.state.getReportedEntity(file) as TestModule
return ctx.report('onTestModuleQueued', testModule)
async onQueued(file) {
await ctx._testRun.enqueued(project, file)
},
onCollected(files) {
ctx.state.collectFiles(project, files)
return ctx.report('onCollected', files)
async onCollected(files) {
await ctx._testRun.collected(project, files)
},
onAfterSuiteRun(meta) {
ctx.coverageProvider?.onAfterSuiteRun(meta)
},
onTaskUpdate(packs) {
ctx.state.updateTasks(packs)
return ctx.report('onTaskUpdate', packs)
async onTaskUpdate(packs, events) {
await ctx._testRun.updated(packs, events)
},
onUserConsoleLog(log) {
ctx.state.updateUserLog(log)
ctx.report('onUserConsoleLog', log)
async onUserConsoleLog(log) {
await ctx._testRun.log(log)
},
onUnhandledError(err, type) {
ctx.state.catchError(err, type)
},
onFinished(files) {
const errors = ctx.state.getUnhandledErrors()
ctx._checkUnhandledErrors(errors)
return ctx.report('onFinished', files, errors)
},
onCancel(reason) {
ctx.cancelCurrentRun(reason)
},

View File

@ -19,7 +19,8 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool {
) {
const checker = project.typechecker!
await ctx.report('onTaskUpdate', checker.getTestPacks())
const { packs, events } = checker.getTestPacksAndEvents()
await ctx._testRun.updated(packs, events)
if (!project.config.typecheck.ignoreSourceErrors) {
sourceErrors.forEach(error =>
@ -62,8 +63,11 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool {
checker.setFiles(files)
checker.onParseStart(async () => {
ctx.state.collectFiles(project, checker.getTestFiles())
await ctx.report('onCollected')
const files = checker.getTestFiles()
for (const file of files) {
await ctx._testRun.enqueued(project, file)
}
await ctx._testRun.collected(project, files)
})
checker.onParseEnd(result => onParseEnd(project, result))
@ -81,10 +85,15 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool {
}
await checker.collectTests()
ctx.state.collectFiles(project, checker.getTestFiles())
await ctx.report('onTaskUpdate', checker.getTestPacks())
await ctx.report('onCollected')
const testFiles = checker.getTestFiles()
for (const file of testFiles) {
await ctx._testRun.enqueued(project, file)
}
await ctx._testRun.collected(project, testFiles)
const { packs, events } = checker.getTestPacksAndEvents()
await ctx._testRun.updated(packs, events)
})
await checker.prepare()
@ -108,8 +117,11 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool {
const checker = await createWorkspaceTypechecker(project, files)
checker.setFiles(files)
await checker.collectTests()
ctx.state.collectFiles(project, checker.getTestFiles())
await ctx.report('onCollected')
const testFiles = checker.getTestFiles()
for (const file of testFiles) {
await ctx._testRun.enqueued(project, file)
}
await ctx._testRun.collected(project, testFiles)
}
}
@ -136,8 +148,11 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool {
})
const triggered = await _p
if (project.typechecker && !triggered) {
ctx.state.collectFiles(project, project.typechecker.getTestFiles())
await ctx.report('onCollected')
const testFiles = project.typechecker.getTestFiles()
for (const file of testFiles) {
await ctx._testRun.enqueued(project, file)
}
await ctx._testRun.collected(project, testFiles)
await onParseEnd(project, project.typechecker.getResult())
continue
}

View File

@ -1,7 +1,7 @@
import type { File, TaskResultPack } from '@vitest/runner'
import type { File } from '@vitest/runner'
import type { Vitest } from '../core'
import type { BaseOptions } from './base'
import type { TestModule } from './reported-tasks'
import type { ReportedHookContext, TestCase, TestModule } from './reported-tasks'
import { BaseReporter } from './base'
import { SummaryReporter } from './summary'
@ -33,6 +33,30 @@ export class DefaultReporter extends BaseReporter {
this.summary?.onTestModuleQueued(file)
}
onTestModuleCollected(module: TestModule) {
this.summary?.onTestModuleCollected(module)
}
onTestModuleEnd(module: TestModule) {
this.summary?.onTestModuleEnd(module)
}
onTestCaseReady(test: TestCase) {
this.summary?.onTestCaseReady(test)
}
onTestCaseResult(test: TestCase) {
this.summary?.onTestCaseResult(test)
}
onHookStart(hook: ReportedHookContext) {
this.summary?.onHookStart(hook)
}
onHookEnd(hook: ReportedHookContext) {
this.summary?.onHookEnd(hook)
}
onInit(ctx: Vitest) {
super.onInit(ctx)
this.summary?.onInit(ctx, { verbose: this.verbose })
@ -52,11 +76,6 @@ export class DefaultReporter extends BaseReporter {
this.summary?.onPathsCollected(paths)
}
onTaskUpdate(packs: TaskResultPack[]) {
this.summary?.onTaskUpdate(packs)
super.onTaskUpdate(packs)
}
onWatcherRerun(files: string[], trigger?: string) {
this.summary?.onWatcherRerun()
super.onWatcherRerun(files, trigger)

View File

@ -1,115 +1,97 @@
import type { File, TaskResultPack, TaskState, Test } from '@vitest/runner'
import type { File, Task, Test } from '@vitest/runner'
import type { Vitest } from '../core'
import { getTests } from '@vitest/runner/utils'
import type { TestCase, TestModule } from './reported-tasks'
import c from 'tinyrainbow'
import { BaseReporter } from './base'
import { WindowRenderer } from './renderers/windowedRenderer'
import { TaskParser } from './task-parser'
interface Icon {
char: string
color: (char: string) => string
}
type TestCaseState = ReturnType<TestCase['result']>['state']
export class DotReporter extends BaseReporter {
private summary?: DotSummary
private renderer?: WindowRenderer
private tests = new Map<Test['id'], TestCaseState>()
private finishedTests = new Set<TestCase['id']>()
onInit(ctx: Vitest) {
super.onInit(ctx)
if (this.isTTY) {
this.summary = new DotSummary()
this.summary.onInit(ctx)
this.renderer = new WindowRenderer({
logger: ctx.logger,
getWindow: () => this.createSummary(),
})
this.ctx.onClose(() => this.renderer?.stop())
}
}
onTaskUpdate(packs: TaskResultPack[]) {
this.summary?.onTaskUpdate(packs)
printTask(task: Task) {
if (!this.isTTY) {
super.onTaskUpdate(packs)
super.printTask(task)
}
}
onWatcherRerun(files: string[], trigger?: string) {
this.summary?.onWatcherRerun()
this.tests.clear()
this.renderer?.start()
super.onWatcherRerun(files, trigger)
}
onFinished(files?: File[], errors?: unknown[]) {
this.summary?.onFinished()
if (this.isTTY) {
const finalLog = formatTests(Array.from(this.tests.values()))
this.ctx.logger.log(finalLog)
}
this.tests.clear()
this.renderer?.finish()
super.onFinished(files, errors)
}
}
class DotSummary extends TaskParser {
private renderer!: WindowRenderer
private tests = new Map<Test['id'], TaskState>()
private finishedTests = new Set<Test['id']>()
onInit(ctx: Vitest): void {
this.ctx = ctx
this.renderer = new WindowRenderer({
logger: ctx.logger,
getWindow: () => this.createSummary(),
})
this.ctx.onClose(() => this.renderer.stop())
}
onWatcherRerun() {
this.tests.clear()
this.renderer.start()
}
onFinished() {
const finalLog = formatTests(Array.from(this.tests.values()))
this.ctx.logger.log(finalLog)
this.tests.clear()
this.renderer.finish()
}
onTestFilePrepare(file: File): void {
for (const test of getTests(file)) {
onTestModuleCollected(module: TestModule): void {
for (const test of module.children.allTests()) {
// Dot reporter marks pending tests as running
this.onTestStart(test)
this.onTestCaseReady(test)
}
}
onTestStart(test: Test) {
onTestCaseReady(test: TestCase) {
if (this.finishedTests.has(test.id)) {
return
}
this.tests.set(test.id, test.mode || 'run')
this.tests.set(test.id, test.result().state || 'run')
}
onTestFinished(test: Test) {
if (this.finishedTests.has(test.id)) {
return
}
onTestCaseResult(test: TestCase) {
this.finishedTests.add(test.id)
this.tests.set(test.id, test.result?.state || 'skip')
this.tests.set(test.id, test.result().state || 'skipped')
}
onTestFileFinished() {
onTestModuleEnd() {
if (!this.isTTY) {
return
}
const columns = this.ctx.logger.getColumns()
if (this.tests.size < columns) {
return
}
const finishedTests = Array.from(this.tests).filter(entry => entry[1] !== 'run')
const finishedTests = Array.from(this.tests).filter(entry => entry[1] !== 'pending')
if (finishedTests.length < columns) {
return
}
// Remove finished tests from state and render them in static output
const states: TaskState[] = []
const states: TestCaseState[] = []
let count = 0
for (const [id, state] of finishedTests) {
@ -138,14 +120,13 @@ const fail: Icon = { char: 'x', color: c.red }
const pending: Icon = { char: '*', color: c.yellow }
const skip: Icon = { char: '-', color: (char: string) => c.dim(c.gray(char)) }
function getIcon(state: TaskState): Icon {
function getIcon(state: TestCaseState): Icon {
switch (state) {
case 'pass':
case 'passed':
return pass
case 'fail':
case 'failed':
return fail
case 'skip':
case 'todo':
case 'skipped':
return skip
default:
return pending
@ -156,7 +137,7 @@ function getIcon(state: TaskState): Icon {
* Format test states into string while keeping ANSI escapes at minimal.
* Sibling icons with same color are merged into a single c.color() call.
*/
function formatTests(states: TaskState[]): string {
function formatTests(states: TestCaseState[]): string {
let currentIcon = pending
let count = 0
let output = ''

View File

@ -1,4 +1,4 @@
import type { Reporter } from '../types/reporter'
import type { Reporter, TestRunEndReason } from '../types/reporter'
import type { BaseOptions, BaseReporter } from './base'
import type { BlobOptions } from './blob'
import type { DefaultReporterOptions } from './default'
@ -27,7 +27,7 @@ export {
TapReporter,
VerboseReporter,
}
export type { BaseReporter, Reporter }
export type { BaseReporter, Reporter, TestRunEndReason }
export {
BenchmarkBuiltinReporters,
@ -70,3 +70,5 @@ export interface BuiltinReporterOptions {
'hanging-process': never
'html': HTMLOptions
}
export type { ReportedHookContext } from './reported-tasks'

View File

@ -5,7 +5,7 @@ import type {
Suite as RunnerTestSuite,
TaskMeta,
} from '@vitest/runner'
import type { TestError } from '@vitest/utils'
import type { SerializedError, TestError } from '@vitest/utils'
import type { TestProject } from '../project'
class ReportedTaskImplementation {
@ -122,12 +122,29 @@ export class TestCase extends ReportedTaskImplementation {
}
/**
* Test results. Will be `undefined` if test is skipped, not finished yet or was just collected.
* Test results.
* - **pending**: Test was collected, but didn't finish running yet.
* - **passed**: Test passed successfully
* - **failed**: Test failed to execute
* - **skipped**: Test was skipped during collection or dynamically with `ctx.skip()`.
*/
public result(): TestResult | undefined {
public result(): TestResult {
const result = this.task.result
const mode = result?.state || this.task.mode
if (!result && (mode === 'skip' || mode === 'todo')) {
return {
state: 'skipped',
note: undefined,
errors: undefined,
}
}
if (!result || result.state === 'run' || result.state === 'queued') {
return undefined
return {
state: 'pending',
errors: undefined,
}
}
const state = result.state === 'fail'
? 'failed' as const
@ -153,14 +170,6 @@ export class TestCase extends ReportedTaskImplementation {
} satisfies TestResultFailed
}
/**
* Checks if the test was skipped during collection or dynamically with `ctx.skip()`.
*/
public skipped(): boolean {
const mode = this.task.result?.state || this.task.mode
return mode === 'skip' || mode === 'todo'
}
/**
* Custom metadata that was attached to the test during its execution.
*/
@ -175,7 +184,7 @@ export class TestCase extends ReportedTaskImplementation {
public diagnostic(): TestDiagnostic | undefined {
const result = this.task.result
// startTime should always be available if the test has properly finished
if (!result || result.state === 'run' || result.state === 'queued' || !result.startTime) {
if (!result || !result.startTime) {
return undefined
}
const duration = result.duration || 0
@ -228,13 +237,13 @@ class TestCollection {
/**
* Filters all tests that are part of this collection and its children.
*/
*allTests(state?: TestResult['state'] | 'running'): Generator<TestCase, undefined, void> {
*allTests(state?: TestState): Generator<TestCase, undefined, void> {
for (const child of this) {
if (child.type === 'suite') {
yield * child.children.allTests(state)
}
else if (state) {
const testState = getTestState(child)
const testState = child.result().state
if (state === testState) {
yield child
}
@ -248,14 +257,14 @@ class TestCollection {
/**
* Filters only the tests that are part of this collection.
*/
*tests(state?: TestResult['state'] | 'running'): Generator<TestCase, undefined, void> {
*tests(state?: TestState): Generator<TestCase, undefined, void> {
for (const child of this) {
if (child.type !== 'test') {
continue
}
if (state) {
const testState = getTestState(child)
const testState = child.result().state
if (state === testState) {
yield child
}
@ -298,6 +307,14 @@ class TestCollection {
export type { TestCollection }
export type ReportedHookContext = {
readonly name: 'beforeAll' | 'afterAll'
readonly entity: TestSuite | TestModule
} | {
readonly name: 'beforeEach' | 'afterEach'
readonly entity: TestCase
}
abstract class SuiteImplementation extends ReportedTaskImplementation {
/** @internal */
declare public readonly task: RunnerTestSuite | RunnerTestFile
@ -313,19 +330,11 @@ abstract class SuiteImplementation extends ReportedTaskImplementation {
this.children = new TestCollection(task, project)
}
/**
* Checks if the suite was skipped during collection.
*/
public skipped(): boolean {
const mode = this.task.mode
return mode === 'skip' || mode === 'todo'
}
/**
* Errors that happened outside of the test run during collection, like syntax errors.
*/
public errors(): TestError[] {
return (this.task.result?.errors as TestError[] | undefined) || []
public errors(): SerializedError[] {
return (this.task.result?.errors as SerializedError[] | undefined) || []
}
}
@ -378,6 +387,13 @@ export class TestSuite extends SuiteImplementation {
*/
declare public ok: () => boolean
/**
* Checks the running state of the suite.
*/
public state(): TestSuiteState {
return getSuiteState(this.task)
}
/**
* Full name of the suite including all parent suites separated with `>`.
*/
@ -402,8 +418,8 @@ export class TestModule extends SuiteImplementation {
/**
* This is usually an absolute UNIX file path.
* It can be a virtual id if the file is not on the disk.
* This value corresponds to Vite's `ModuleGraph` id.
* It can be a virtual ID if the file is not on the disk.
* This value corresponds to the ID in the Vite's module graph.
*/
public readonly moduleId: string
@ -413,17 +429,23 @@ export class TestModule extends SuiteImplementation {
this.moduleId = task.filepath
}
/**
* Checks the running state of the test file.
*/
public state(): TestModuleState {
const state = this.task.result?.state
if (state === 'queued') {
return 'queued'
}
return getSuiteState(this.task)
}
/**
* Checks if the module has any failed tests.
* This will also return `false` if module failed during collection.
*/
declare public ok: () => boolean
/**
* Checks if the module was skipped and didn't run.
*/
declare public skipped: () => boolean
/**
* Useful information about the module like duration, memory usage, etc.
* If the module was not executed yet, all diagnostic values will return `0`.
@ -445,51 +467,75 @@ export class TestModule extends SuiteImplementation {
}
export interface TaskOptions {
each: boolean | undefined
concurrent: boolean | undefined
shuffle: boolean | undefined
retry: number | undefined
repeats: number | undefined
mode: 'run' | 'only' | 'skip' | 'todo' | 'queued'
readonly each: boolean | undefined
readonly fails: boolean | undefined
readonly concurrent: boolean | undefined
readonly shuffle: boolean | undefined
readonly retry: number | undefined
readonly repeats: number | undefined
readonly mode: 'run' | 'only' | 'skip' | 'todo'
}
function buildOptions(
task: RunnerTestCase | RunnerTestFile | RunnerTestSuite,
task: RunnerTestCase | RunnerTestSuite,
): TaskOptions {
return {
each: task.each,
fails: task.type === 'test' && task.fails,
concurrent: task.concurrent,
shuffle: task.shuffle,
retry: task.retry,
repeats: task.repeats,
mode: task.mode,
// runner types are too broad, but the public API should be more strict
// the queued state exists only on Files and this method is called
// only for tests and suites
mode: task.mode as TaskOptions['mode'],
}
}
export type TestResult = TestResultPassed | TestResultFailed | TestResultSkipped
export type TestSuiteState = 'skipped' | 'pending' | 'failed' | 'passed'
export type TestModuleState = TestSuiteState | 'queued'
export type TestState = TestResult['state']
export type TestResult =
| TestResultPassed
| TestResultFailed
| TestResultSkipped
| TestResultPending
export interface TestResultPending {
/**
* The test was collected, but didn't finish running yet.
*/
readonly state: 'pending'
/**
* Pending tests have no errors.
*/
readonly errors: undefined
}
export interface TestResultPassed {
/**
* The test passed successfully.
*/
state: 'passed'
readonly state: 'passed'
/**
* Errors that were thrown during the test execution.
*
* **Note**: If test was retried successfully, errors will still be reported.
*/
errors: TestError[] | undefined
readonly errors: ReadonlyArray<TestError> | undefined
}
export interface TestResultFailed {
/**
* The test failed to execute.
*/
state: 'failed'
readonly state: 'failed'
/**
* Errors that were thrown during the test execution.
*/
errors: TestError[]
readonly errors: ReadonlyArray<TestError>
}
export interface TestResultSkipped {
@ -497,80 +543,72 @@ export interface TestResultSkipped {
* The test was skipped with `only` (on another test), `skip` or `todo` flag.
* You can see which one was used in the `options.mode` option.
*/
state: 'skipped'
readonly state: 'skipped'
/**
* Skipped tests have no errors.
*/
errors: undefined
readonly errors: undefined
/**
* A custom note passed down to `ctx.skip(note)`.
*/
note: string | undefined
readonly note: string | undefined
}
export interface TestDiagnostic {
/**
* If the duration of the test is above `slowTestThreshold`.
*/
slow: boolean
readonly slow: boolean
/**
* The amount of memory used by the test in bytes.
* This value is only available if the test was executed with `logHeapUsage` flag.
*/
heap: number | undefined
readonly heap: number | undefined
/**
* The time it takes to execute the test in ms.
*/
duration: number
readonly duration: number
/**
* The time in ms when the test started.
*/
startTime: number
readonly startTime: number
/**
* The amount of times the test was retried.
*/
retryCount: number
readonly retryCount: number
/**
* The amount of times the test was repeated as configured by `repeats` option.
* This value can be lower if the test failed during the repeat and no `retry` is configured.
*/
repeatCount: number
readonly repeatCount: number
/**
* If test passed on a second retry.
*/
flaky: boolean
readonly flaky: boolean
}
export interface ModuleDiagnostic {
/**
* The time it takes to import and initiate an environment.
*/
environmentSetupDuration: number
readonly environmentSetupDuration: number
/**
* The time it takes Vitest to setup test harness (runner, mocks, etc.).
*/
prepareDuration: number
readonly prepareDuration: number
/**
* The time it takes to import the test module.
* This includes importing everything in the module and executing suite callbacks.
*/
collectDuration: number
readonly collectDuration: number
/**
* The time it takes to import the setup module.
*/
setupDuration: number
readonly setupDuration: number
/**
* Accumulated duration of all tests and hooks in the module.
*/
duration: number
}
function getTestState(test: TestCase): TestResult['state'] | 'running' {
if (test.skipped()) {
return 'skipped'
}
const result = test.result()
return result ? result.state : 'running'
readonly duration: number
}
function storeTask(
@ -593,3 +631,21 @@ function getReportedTask(
}
return reportedTask
}
function getSuiteState(task: RunnerTestSuite | RunnerTestFile): TestSuiteState {
const mode = task.mode
const state = task.result?.state
if (mode === 'skip' || mode === 'todo' || state === 'skip' || state === 'todo') {
return 'skipped'
}
if (state == null || state === 'run' || state === 'only') {
return 'pending'
}
if (state === 'fail') {
return 'failed'
}
if (state === 'pass') {
return 'passed'
}
throw new Error(`Unknown suite state: ${state}`)
}

View File

@ -1,14 +1,10 @@
import type { File, Test } from '@vitest/runner'
import type { Vitest } from '../core'
import type { Reporter } from '../types/reporter'
import type { TestModule } from './reported-tasks'
import type { HookOptions } from './task-parser'
import { getTests } from '@vitest/runner/utils'
import type { ReportedHookContext, TestCase, TestModule } from './reported-tasks'
import c from 'tinyrainbow'
import { F_POINTER, F_TREE_NODE_END, F_TREE_NODE_MIDDLE } from './renderers/figures'
import { formatProjectName, formatTime, formatTimeString, padSummaryTitle } from './renderers/utils'
import { WindowRenderer } from './renderers/windowedRenderer'
import { TaskParser } from './task-parser'
const DURATION_UPDATE_INTERVAL_MS = 100
const FINISHED_TEST_CLEANUP_TIME_MS = 1_000
@ -34,33 +30,32 @@ interface SlowTask {
hook?: Omit<SlowTask, 'hook'>
}
interface RunningTest extends Pick<Counter, 'total' | 'completed'> {
filename: File['name']
projectName: File['projectName']
interface RunningModule extends Pick<Counter, 'total' | 'completed'> {
filename: TestModule['task']['name']
projectName: TestModule['project']['name']
hook?: Omit<SlowTask, 'hook'>
tests: Map<Test['id'], SlowTask>
tests: Map<TestCase['id'], SlowTask>
typecheck: boolean
}
/**
* Reporter extension that renders summary and forwards all other logs above itself.
* Intended to be used by other reporters, not as a standalone reporter.
*/
export class SummaryReporter extends TaskParser implements Reporter {
export class SummaryReporter implements Reporter {
private ctx!: Vitest
private options!: Options
private renderer!: WindowRenderer
private suites = emptyCounters()
private modules = emptyCounters()
private tests = emptyCounters()
private maxParallelTests = 0
/** Currently running tests, may include finished tests too */
private runningTests = new Map<File['id'], RunningTest>()
/** Currently running test modules, may include finished test modules too */
private runningModules = new Map<TestModule['id'], RunningModule>()
/** ID of finished `this.runningTests` that are currently being shown */
private finishedTests = new Map<File['id'], NodeJS.Timeout>()
/** IDs of all finished tests */
private allFinishedTests = new Set<File['id']>()
/** ID of finished `this.runningModules` that are currently being shown */
private finishedModules = new Map<TestModule['id'], NodeJS.Timeout>()
private startTime = ''
private currentTime = 0
@ -88,19 +83,14 @@ export class SummaryReporter extends TaskParser implements Reporter {
})
}
onTestModuleQueued(module: TestModule) {
this.onTestFilePrepare(module.task)
}
onPathsCollected(paths?: string[]) {
this.suites.total = (paths || []).length
this.modules.total = (paths || []).length
}
onWatcherRerun() {
this.runningTests.clear()
this.finishedTests.clear()
this.allFinishedTests.clear()
this.suites = emptyCounters()
this.runningModules.clear()
this.finishedModules.clear()
this.modules = emptyCounters()
this.tests = emptyCounters()
this.startTimers()
@ -108,50 +98,38 @@ export class SummaryReporter extends TaskParser implements Reporter {
}
onFinished() {
this.runningTests.clear()
this.finishedTests.clear()
this.allFinishedTests.clear()
this.runningModules.clear()
this.finishedModules.clear()
this.renderer.finish()
clearInterval(this.durationInterval)
}
onTestFilePrepare(file: File) {
if (this.runningTests.has(file.id)) {
const stats = this.runningTests.get(file.id)!
// if there are no tests, it means the test was queued but not collected
if (!stats.total) {
const total = getTests(file).length
this.tests.total += total
stats.total = total
}
return
onTestModuleQueued(module: TestModule) {
// When new test module starts, take the place of previously finished test module, if any
if (this.finishedModules.size) {
const finished = this.finishedModules.keys().next().value
this.removeTestModule(finished)
}
if (this.allFinishedTests.has(file.id)) {
return
}
const total = getTests(file).length
this.tests.total += total
// When new test starts, take the place of previously finished test, if any
if (this.finishedTests.size) {
const finished = this.finishedTests.keys().next().value
this.removeTestFile(finished)
}
this.runningTests.set(file.id, {
total,
completed: 0,
filename: file.name,
projectName: file.projectName,
tests: new Map(),
})
this.maxParallelTests = Math.max(this.maxParallelTests, this.runningTests.size)
this.runningModules.set(module.id, initializeStats(module))
}
onHookStart(options: HookOptions) {
onTestModuleCollected(module: TestModule) {
let stats = this.runningModules.get(module.id)
if (!stats) {
stats = initializeStats(module)
this.runningModules.set(module.id, stats)
}
const total = Array.from(module.children.allTests()).length
this.tests.total += total
stats.total = total
this.maxParallelTests = Math.max(this.maxParallelTests, this.runningModules.size)
}
onHookStart(options: ReportedHookContext) {
const stats = this.getHookStats(options)
if (!stats) {
@ -174,7 +152,7 @@ export class SummaryReporter extends TaskParser implements Reporter {
hook.onFinish = () => clearTimeout(timeout)
}
onHookEnd(options: HookOptions) {
onHookEnd(options: ReportedHookContext) {
const stats = this.getHookStats(options)
if (stats?.hook?.name !== options.name) {
@ -185,13 +163,13 @@ export class SummaryReporter extends TaskParser implements Reporter {
stats.hook.visible = false
}
onTestStart(test: Test) {
onTestCaseReady(test: TestCase) {
// Track slow running tests only on verbose mode
if (!this.options.verbose) {
return
}
const stats = this.getTestStats(test)
const stats = this.runningModules.get(test.module.id)
if (!stats || stats.tests.has(test.id)) {
return
@ -216,8 +194,8 @@ export class SummaryReporter extends TaskParser implements Reporter {
stats.tests.set(test.id, slowTest)
}
onTestFinished(test: Test) {
const stats = this.getTestStats(test)
onTestCaseResult(test: TestCase) {
const stats = this.runningModules.get(test.module.id)
if (!stats) {
return
@ -227,97 +205,78 @@ export class SummaryReporter extends TaskParser implements Reporter {
stats.tests.delete(test.id)
stats.completed++
const result = test.result
const result = test.result()
if (result?.state === 'pass') {
if (result?.state === 'passed') {
this.tests.passed++
}
else if (result?.state === 'fail') {
else if (result?.state === 'failed') {
this.tests.failed++
}
else if (!result?.state || result?.state === 'skip' || result?.state === 'todo') {
else if (!result?.state || result?.state === 'skipped') {
this.tests.skipped++
}
}
onTestFileFinished(file: File) {
if (this.allFinishedTests.has(file.id)) {
return
onTestModuleEnd(module: TestModule) {
const state = module.state()
this.modules.completed++
if (state === 'passed') {
this.modules.passed++
}
else if (state === 'failed') {
this.modules.failed++
}
else if (module.task.mode === 'todo' && state === 'skipped') {
this.modules.todo++
}
else if (state === 'skipped') {
this.modules.skipped++
}
this.allFinishedTests.add(file.id)
this.suites.completed++
if (file.result?.state === 'pass') {
this.suites.passed++
}
else if (file.result?.state === 'fail') {
this.suites.failed++
}
else if (file.result?.state === 'skip') {
this.suites.skipped++
}
else if (file.result?.state === 'todo') {
this.suites.todo++
}
const left = this.suites.total - this.suites.completed
const left = this.modules.total - this.modules.completed
// Keep finished tests visible in summary for a while if there are more tests left.
// When a new test starts in onTestFilePrepare it will take this ones place.
// When a new test starts in onTestModuleQueued it will take this ones place.
// This reduces flickering by making summary more stable.
if (left > this.maxParallelTests) {
this.finishedTests.set(file.id, setTimeout(() => {
this.removeTestFile(file.id)
this.finishedModules.set(module.id, setTimeout(() => {
this.removeTestModule(module.id)
}, FINISHED_TEST_CLEANUP_TIME_MS).unref())
}
else {
// Run is about to end as there are less tests left than whole run had parallel at max.
// Remove finished test immediately.
this.removeTestFile(file.id)
// Remove finished test immediatelly.
this.removeTestModule(module.id)
}
}
private getTestStats(test: Test) {
const file = test.file
let stats = this.runningTests.get(file.id)
if (!stats || stats.total === 0) {
// It's possible that that test finished before it's preparation was even reported
this.onTestFilePrepare(test.file)
stats = this.runningTests.get(file.id)!
// It's also possible that this update came after whole test file was reported as finished
if (!stats) {
return
}
}
return stats
}
private getHookStats({ file, id, type }: HookOptions) {
private getHookStats({ entity }: ReportedHookContext) {
// Track slow running hooks only on verbose mode
if (!this.options.verbose) {
return
}
const stats = this.runningTests.get(file.id)
const module = entity.type === 'module' ? entity : entity.module
const stats = this.runningModules.get(module.id)
if (!stats) {
return
}
return type === 'suite' ? stats : stats?.tests.get(id)
return entity.type === 'test' ? stats.tests.get(entity.id) : stats
}
private createSummary() {
const summary = ['']
for (const testFile of Array.from(this.runningTests.values()).sort(sortRunningTests)) {
for (const testFile of Array.from(this.runningModules.values()).sort(sortRunningModules)) {
const typecheck = testFile.typecheck ? `${c.bgBlue(c.bold(' TS '))} ` : ''
summary.push(
c.bold(c.yellow(` ${F_POINTER} `))
+ formatProjectName(testFile.projectName)
+ typecheck
+ testFile.filename
+ c.dim(!testFile.completed && !testFile.total
? ' [queued]'
@ -345,11 +304,11 @@ export class SummaryReporter extends TaskParser implements Reporter {
}
}
if (this.runningTests.size > 0) {
if (this.runningModules.size > 0) {
summary.push('')
}
summary.push(padSummaryTitle('Test Files') + getStateString(this.suites))
summary.push(padSummaryTitle('Test Files') + getStateString(this.modules))
summary.push(padSummaryTitle('Tests') + getStateString(this.tests))
summary.push(padSummaryTitle('Start at') + this.startTime)
summary.push(padSummaryTitle('Duration') + formatTime(this.duration))
@ -369,19 +328,19 @@ export class SummaryReporter extends TaskParser implements Reporter {
}, DURATION_UPDATE_INTERVAL_MS).unref()
}
private removeTestFile(id?: File['id']) {
private removeTestModule(id?: TestModule['id']) {
if (!id) {
return
}
const testFile = this.runningTests.get(id)
const testFile = this.runningModules.get(id)
testFile?.hook?.onFinish()
testFile?.tests?.forEach(test => test.onFinish())
this.runningTests.delete(id)
this.runningModules.delete(id)
clearTimeout(this.finishedTests.get(id))
this.finishedTests.delete(id)
clearTimeout(this.finishedModules.get(id))
this.finishedModules.delete(id)
}
}
@ -402,7 +361,7 @@ function getStateString(entry: Counter) {
)
}
function sortRunningTests(a: RunningTest, b: RunningTest) {
function sortRunningModules(a: RunningModule, b: RunningModule) {
if ((a.projectName || '') > (b.projectName || '')) {
return 1
}
@ -413,3 +372,14 @@ function sortRunningTests(a: RunningTest, b: RunningTest) {
return a.filename.localeCompare(b.filename)
}
function initializeStats(module: TestModule): RunningModule {
return {
total: 0,
completed: 0,
filename: module.task.name,
projectName: module.project.name,
tests: new Map(),
typecheck: !!module.task.meta.typecheck,
}
}

View File

@ -1,86 +0,0 @@
import type { File, Task, TaskResultPack, Test } from '@vitest/runner'
import type { Vitest } from '../core'
import { getTests } from '@vitest/runner/utils'
export interface HookOptions {
name: string
file: File
id: File['id'] | Test['id']
type: Task['type']
}
export class TaskParser {
ctx!: Vitest
onInit(ctx: Vitest) {
this.ctx = ctx
}
onHookStart(_options: HookOptions) {}
onHookEnd(_options: HookOptions) {}
onTestStart(_test: Test) {}
onTestFinished(_test: Test) {}
onTestFilePrepare(_file: File) {}
onTestFileFinished(_file: File) {}
onTaskUpdate(packs: TaskResultPack[]) {
const startingTestFiles: File[] = []
const finishedTestFiles: File[] = []
const startingTests: Test[] = []
const finishedTests: Test[] = []
const startingHooks: HookOptions[] = []
const endingHooks: HookOptions[] = []
for (const pack of packs) {
const task = this.ctx.state.idMap.get(pack[0])
if (task?.type === 'suite' && 'filepath' in task && task.result?.state) {
if (task?.result?.state === 'run' || task?.result?.state === 'queued') {
startingTestFiles.push(task)
}
else {
// Skipped tests are not reported, do it manually
for (const test of getTests(task)) {
if (!test.result || test.result?.state === 'skip') {
finishedTests.push(test)
}
}
finishedTestFiles.push(task.file)
}
}
if (task?.type === 'test') {
if (task.result?.state === 'run' || task.result?.state === 'queued') {
startingTests.push(task)
}
else if (task.result?.hooks?.afterEach !== 'run') {
finishedTests.push(task)
}
}
if (task?.result?.hooks) {
for (const [hook, state] of Object.entries(task.result.hooks)) {
if (state === 'run' || state === 'queued') {
startingHooks.push({ name: hook, file: task.file, id: task.id, type: task.type })
}
else {
endingHooks.push({ name: hook, file: task.file, id: task.id, type: task.type })
}
}
}
}
endingHooks.forEach(hook => this.onHookEnd(hook))
finishedTests.forEach(test => this.onTestFinished(test))
finishedTestFiles.forEach(file => this.onTestFileFinished(file))
startingTestFiles.forEach(file => this.onTestFilePrepare(file))
startingTests.forEach(test => this.onTestStart(test))
startingHooks.forEach(hook => this.onHookStart(hook))
}
}

View File

@ -1,6 +1,9 @@
import type { SerializedTestSpecification } from '../runtime/types/utils'
import type { TestProject } from './project'
import type { TestModule } from './reporters/reported-tasks'
import type { Pool } from './types/pool-options'
import { generateFileHash } from '@vitest/runner/utils'
import { relative } from 'pathe'
export class TestSpecification {
/**
@ -16,6 +19,10 @@ export class TestSpecification {
*/
public readonly 2: { pool: Pool }
/**
* The task ID associated with the test module.
*/
public readonly taskId: string
/**
* The test project that the module belongs to.
*/
@ -43,12 +50,34 @@ export class TestSpecification {
this[0] = project
this[1] = moduleId
this[2] = { pool }
const name = project.config.name
const hashName = pool !== 'typescript'
? name
: name
// https://github.com/vitest-dev/vitest/blob/main/packages/vitest/src/typecheck/collect.ts#L58
? `${name}:__typecheck__`
: '__typecheck__'
this.taskId = generateFileHash(
relative(project.config.root, moduleId),
hashName,
)
this.project = project
this.moduleId = moduleId
this.pool = pool
this.testLines = testLines
}
/**
* Test module associated with the specification.
*/
get testModule(): TestModule | undefined {
const task = this.project.vitest.state.idMap.get(this.taskId)
if (!task) {
return undefined
}
return this.project.vitest.state.getReportedEntity(task) as TestModule | undefined
}
toJSON(): SerializedTestSpecification {
return [
{

View File

@ -0,0 +1,170 @@
import type { File as RunnerTestFile, TaskEventPack, TaskResultPack, TaskUpdateEvent } from '@vitest/runner'
import type { SerializedError } from '../public/utils'
import type { UserConsoleLog } from '../types/general'
import type { Vitest } from './core'
import type { TestProject } from './project'
import type { ReportedHookContext, TestCollection, TestModule } from './reporters/reported-tasks'
import type { TestSpecification } from './spec'
import assert from 'node:assert'
import { serializeError } from '@vitest/utils/error'
export class TestRun {
constructor(private vitest: Vitest) {}
async start(specifications: TestSpecification[]) {
await this.vitest.report('onTestRunStart', [...specifications])
}
async enqueued(project: TestProject, file: RunnerTestFile) {
this.vitest.state.collectFiles(project, [file])
const testModule = this.vitest.state.getReportedEntity(file) as TestModule
await this.vitest.report('onTestModuleQueued', testModule)
}
async collected(project: TestProject, files: RunnerTestFile[]) {
this.vitest.state.collectFiles(project, files)
await Promise.all([
this.vitest.report('onCollected', files),
...files.map((file) => {
const testModule = this.vitest.state.getReportedEntity(file) as TestModule
return this.vitest.report('onTestModuleCollected', testModule)
}),
])
}
async log(log: UserConsoleLog) {
this.vitest.state.updateUserLog(log)
await this.vitest.report('onUserConsoleLog', log)
}
async updated(update: TaskResultPack[], events: TaskEventPack[]) {
this.vitest.state.updateTasks(update)
// TODO: what is the order or reports here?
// "onTaskUpdate" in parallel with others or before all or after all?
// TODO: error handling - what happens if custom reporter throws an error?
await this.vitest.report('onTaskUpdate', update)
for (const [id, event] of events) {
await this.reportEvent(id, event).catch((error) => {
this.vitest.state.catchError(serializeError(error), 'Unhandled Reporter Error')
})
}
}
async end(specifications: TestSpecification[], errors: unknown[], coverage?: unknown) {
// specification won't have the File task if they were filtered by the --shard command
const modules = specifications.map(spec => spec.testModule).filter(s => s != null)
const files = modules.map(m => m.task)
const state = this.vitest.isCancelling
? 'interrupted'
// by this point, the run will be marked as failed if there are any errors,
// should it be done by testRun.end?
: process.exitCode
? 'failed'
: 'passed'
try {
await Promise.all([
this.vitest.report('onTestRunEnd', modules, [...errors] as SerializedError[], state),
// TODO: in a perfect world, the coverage should be done in parallel to `onFinished`
this.vitest.report('onFinished', files, errors, coverage),
])
}
finally {
if (coverage) {
await this.vitest.report('onCoverage', coverage)
}
}
}
private async reportEvent(id: string, event: TaskUpdateEvent) {
const task = this.vitest.state.idMap.get(id)
const entity = task && this.vitest.state.getReportedEntity(task)
assert(task && entity, `Entity must be found for task ${task?.name || id}`)
if (event === 'suite-prepare' && entity.type === 'suite') {
return await this.vitest.report('onTestSuiteReady', entity)
}
if (event === 'suite-prepare' && entity.type === 'module') {
return await this.vitest.report('onTestModuleStart', entity)
}
if (event === 'suite-finished') {
assert(entity.type === 'suite' || entity.type === 'module', 'Entity type must be suite or module')
if (entity.state() === 'skipped') {
// everything inside suite or a module is skipped,
// so we won't get any children events
// we need to report everything manually
await this.reportChildren(entity.children)
}
else {
// skipped tests need to be reported manually once test module/suite has finished
for (const test of entity.children.tests('skipped')) {
if (test.task.result?.pending) {
// pending error tasks are reported normally
continue
}
await this.vitest.report('onTestCaseReady', test)
await this.vitest.report('onTestCaseResult', test)
}
}
if (entity.type === 'module') {
await this.vitest.report('onTestModuleEnd', entity)
}
else {
await this.vitest.report('onTestSuiteResult', entity)
}
return
}
if (event === 'test-prepare' && entity.type === 'test') {
return await this.vitest.report('onTestCaseReady', entity)
}
if (event === 'test-finished' && entity.type === 'test') {
return await this.vitest.report('onTestCaseResult', entity)
}
if (event.startsWith('before-hook') || event.startsWith('after-hook')) {
const isBefore = event.startsWith('before-hook')
const hook: ReportedHookContext = entity.type === 'test'
? {
name: isBefore ? 'beforeEach' : 'afterEach',
entity,
}
: {
name: isBefore ? 'beforeAll' : 'afterAll',
entity,
}
if (event.endsWith('-start')) {
await this.vitest.report('onHookStart', hook)
}
else {
await this.vitest.report('onHookEnd', hook)
}
}
}
private async reportChildren(children: TestCollection) {
for (const child of children) {
if (child.type === 'test') {
await this.vitest.report('onTestCaseReady', child)
await this.vitest.report('onTestCaseResult', child)
}
else {
await this.vitest.report('onTestSuiteReady', child)
await this.reportChildren(child.children)
await this.vitest.report('onTestSuiteResult', child)
}
}
}
}

View File

@ -1,20 +1,38 @@
import type { File, TaskResultPack } from '@vitest/runner'
import type { SerializedError } from '@vitest/utils'
import type { SerializedTestSpecification } from '../../runtime/types/utils'
import type { Awaitable, UserConsoleLog } from '../../types/general'
import type { Vitest } from '../core'
import type { TestModule } from '../reporters/reported-tasks'
import type { ReportedHookContext, TestCase, TestModule, TestSuite } from '../reporters/reported-tasks'
import type { TestSpecification } from '../spec'
export type TestRunEndReason = 'passed' | 'interrupted' | 'failed'
export interface Reporter {
onInit?: (ctx: Vitest) => void
onInit?: (vitest: Vitest) => void
/**
* @deprecated use `onTestRunStart` instead
*/
onPathsCollected?: (paths?: string[]) => Awaitable<void>
/**
* @deprecated use `onTestRunStart` instead
*/
onSpecsCollected?: (specs?: SerializedTestSpecification[]) => Awaitable<void>
onTestModuleQueued?: (file: TestModule) => Awaitable<void>
onCollected?: (files?: File[]) => Awaitable<void>
/**
* @deprecated use `onTestModuleCollected` instead
*/
onCollected?: (files: File[]) => Awaitable<void>
/**
* @deprecated use `onTestRunEnd` instead
*/
onFinished?: (
files: File[],
errors: unknown[],
coverage?: unknown
) => Awaitable<void>
/**
* @deprecated use `onTestModuleQueued`, `onTestModuleStart`, `onTestModuleEnd`, `onTestCaseReady`, `onTestCaseResult` instead
*/
onTaskUpdate?: (packs: TaskResultPack[]) => Awaitable<void>
onTestRemoved?: (trigger?: string) => Awaitable<void>
onWatcherStart?: (files?: File[], errors?: unknown[]) => Awaitable<void>
@ -22,4 +40,67 @@ export interface Reporter {
onServerRestart?: (reason?: string) => Awaitable<void>
onUserConsoleLog?: (log: UserConsoleLog) => Awaitable<void>
onProcessTimeout?: () => Awaitable<void>
/**
* Called when the new test run starts.
*/
onTestRunStart?: (specifications: ReadonlyArray<TestSpecification>) => Awaitable<void>
/**
* Called when the test run is finished.
*/
onTestRunEnd?: (
testModules: ReadonlyArray<TestModule>,
unhandledErrors: ReadonlyArray<SerializedError>,
reason: TestRunEndReason
) => Awaitable<void>
/**
* Called when the module is enqueued for testing. The file itself is not loaded yet.
*/
onTestModuleQueued?: (testModule: TestModule) => Awaitable<void>
/**
* Called when the test file is loaded and the module is ready to run tests.
*/
onTestModuleCollected?: (testModule: TestModule) => Awaitable<void>
/**
* Called when starting to run tests of the test file
*/
onTestModuleStart?: (testModule: TestModule) => Awaitable<void>
/**
* Called when all tests of the test file have finished running.
*/
onTestModuleEnd?: (testModule: TestModule) => Awaitable<void>
/**
* Called when test case is ready to run.
* Called before the `beforeEach` hooks for the test are run.
*/
onTestCaseReady?: (testCase: TestCase) => Awaitable<void>
/**
* Called after the test and its hooks are finished running.
* The `result()` cannot be `pending`.
*/
onTestCaseResult?: (testCase: TestCase) => Awaitable<void>
/**
* Called when test suite is ready to run.
* Called before the `beforeAll` hooks for the test are run.
*/
onTestSuiteReady?: (testSuite: TestSuite) => Awaitable<void>
/**
* Called after the test suite and its hooks are finished running.
* The `state` cannot be `pending`.
*/
onTestSuiteResult?: (testSuite: TestSuite) => Awaitable<void>
/**
* Called before the hook starts to run.
*/
onHookStart?: (hook: ReportedHookContext) => Awaitable<void>
/**
* Called after the hook finished running.
*/
onHookEnd?: (hook: ReportedHookContext) => Awaitable<void>
onCoverage?: (coverage: unknown) => Awaitable<void>
}

View File

@ -34,17 +34,20 @@ export type { JUnitOptions } from '../node/reporters/junit'
export type {
ModuleDiagnostic,
TaskOptions,
TestCase,
TestCollection,
TestDiagnostic,
TestModule,
TestModuleState,
TestResult,
TestResultFailed,
TestResultPassed,
TestResultSkipped,
TestState,
TestSuite,
TestSuiteState,
} from '../node/reporters/reported-tasks'
export { BaseSequencer } from '../node/sequencers/BaseSequencer'
@ -152,9 +155,12 @@ export type {
RunnerTestSuite,
} from './index'
export type {
ReportedHookContext,
Reporter,
TestRunEndReason,
} from './reporters'
export { generateFileHash } from '@vitest/runner/utils'
export type { SerializedError } from '@vitest/utils'
export {
esbuildVersion,

View File

@ -22,5 +22,7 @@ export type {
JsonAssertionResult,
JsonTestResult,
JsonTestResults,
ReportedHookContext,
Reporter,
TestRunEndReason,
} from '../node/reporters'

View File

@ -75,7 +75,6 @@ export function createRuntimeRpc(
{
eventNames: [
'onUserConsoleLog',
'onFinished',
'onCollected',
'onCancel',
],

View File

@ -1,6 +1,7 @@
import type {
Suite,
Task,
TaskUpdateEvent,
VitestRunner,
VitestRunnerImportSource,
} from '@vitest/runner'
@ -59,7 +60,7 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
startTime: start,
benchmark: createBenchmarkResult(suite.name),
}
updateTask(suite)
updateTask('suite-prepare', suite)
const addBenchTaskListener = (
task: InstanceType<typeof Task>,
@ -82,7 +83,7 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
if (!runner.config.benchmark?.includeSamples) {
result.samples.length = 0
}
updateTask(benchmark)
updateTask('test-finished', benchmark)
},
{
once: true,
@ -122,7 +123,7 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
for (const benchmark of benchmarkGroup) {
const task = benchmarkTasks.get(benchmark)!
updateTask(benchmark)
updateTask('test-prepare', benchmark)
await task.warmup()
tasks.push([
await new Promise<BenchTask>(resolve =>
@ -137,14 +138,14 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
suite.result!.duration = performance.now() - start
suite.result!.state = 'pass'
updateTask(suite)
updateTask('suite-finished', suite)
defer.resolve(null)
await defer
}
function updateTask(task: Task) {
updateRunnerTask(task, runner)
function updateTask(event: TaskUpdateEvent, task: Task) {
updateRunnerTask(event, task, runner)
}
}

View File

@ -62,9 +62,9 @@ export async function resolveTestRunner(
// patch some methods, so custom runners don't need to call RPC
const originalOnTaskUpdate = testRunner.onTaskUpdate
testRunner.onTaskUpdate = async (task) => {
const p = rpc().onTaskUpdate(task)
await originalOnTaskUpdate?.call(testRunner, task)
testRunner.onTaskUpdate = async (task, events) => {
const p = rpc().onTaskUpdate(task, events)
await originalOnTaskUpdate?.call(testRunner, task, events)
return p
}

View File

@ -1,5 +1,4 @@
import type { File, RunMode, Suite, Test } from '@vitest/runner'
import type { Node } from 'estree'
import type { RawSourceMap } from 'vite-node'
import type { TestProject } from '../node/project'
import {
@ -71,7 +70,7 @@ export async function collectTests(
}
file.file = file
const definitions: LocalCallDefinition[] = []
const getName = (callee: Node): string | null => {
const getName = (callee: any): string | null => {
if (!callee) {
return null
}
@ -85,20 +84,18 @@ export async function collectTests(
return getName(callee.tag)
}
if (callee.type === 'MemberExpression') {
const object = callee.object as any
if (
callee.object?.type === 'Identifier'
&& ['it', 'test', 'describe', 'suite'].includes(callee.object.name)
) {
return callee.object?.name
}
// direct call as `__vite_ssr_exports_0__.test()`
if (object?.name?.startsWith('__vite_ssr_')) {
if (callee.object?.name?.startsWith('__vite_ssr_')) {
return getName(callee.property)
}
// call as `__vite_ssr__.test.skip()`
return getName(object?.property)
}
// unwrap (0, ...)
if (callee.type === 'SequenceExpression' && callee.expressions.length === 2) {
const [e0, e1] = callee.expressions
if (e0.type === 'Literal' && e0.value === 0) {
return getName(e1)
}
return getName(callee.object?.property)
}
return null
}
@ -114,15 +111,15 @@ export async function collectTests(
return
}
const property = callee?.property?.name
const mode = !property || property === name ? 'run' : property
// the test node for skipIf and runIf will be the next CallExpression
if (mode === 'each' || mode === 'skipIf' || mode === 'runIf' || mode === 'for') {
let mode = !property || property === name ? 'run' : property
// they will be picked up in the next iteration
if (['each', 'for', 'skipIf', 'runIf'].includes(mode)) {
return
}
let start: number
const end = node.end
// .each
if (callee.type === 'CallExpression') {
start = callee.end
}
@ -137,13 +134,15 @@ export async function collectTests(
arguments: [messageNode],
} = node
if (!messageNode) {
// called as "test()"
return
const isQuoted = messageNode?.type === 'Literal' || messageNode?.type === 'TemplateLiteral'
const message = isQuoted
? request.code.slice(messageNode.start + 1, messageNode.end - 1)
: request.code.slice(messageNode.start, messageNode.end)
// cannot statically analyze, so we always skip it
if (mode === 'skipIf' || mode === 'runIf') {
mode = 'skip'
}
const message = getNodeAsString(messageNode, request.code)
definitions.push({
start,
end,

View File

@ -1,5 +1,5 @@
import type { RawSourceMap } from '@ampproject/remapping'
import type { File, Task, TaskResultPack, TaskState } from '@vitest/runner'
import type { File, Task, TaskEventPack, TaskResultPack, TaskState } from '@vitest/runner'
import type { ParsedStack } from '@vitest/utils'
import type { EachMapping } from '@vitest/utils/source-map'
import type { ChildProcess } from 'node:child_process'
@ -10,10 +10,10 @@ import type { FileInformation } from './collect'
import type { TscErrorInfo } from './types'
import { rm } from 'node:fs/promises'
import { performance } from 'node:perf_hooks'
import { getTasks } from '@vitest/runner/utils'
import { eachMapping, generatedPositionFor, TraceMap } from '@vitest/utils/source-map'
import { basename, extname, resolve } from 'pathe'
import { x } from 'tinyexec'
import { convertTasksToEvents } from '../utils/tasks'
import { collectTests } from './collect'
import { getRawErrsMapFromTsCompile, getTsconfig } from './parse'
import { createIndexMap } from './utils'
@ -358,11 +358,17 @@ export class Typechecker {
return Object.values(this._tests || {}).map(i => i.file)
}
public getTestPacks() {
return Object.values(this._tests || {})
.map(({ file }) => getTasks(file))
.flat()
.map<TaskResultPack>(i => [i.id, i.result, { typecheck: true }])
public getTestPacksAndEvents() {
const packs: TaskResultPack[] = []
const events: TaskEventPack[] = []
for (const { file } of Object.values(this._tests || {})) {
const result = convertTasksToEvents(file)
packs.push(...result.packs)
events.push(...result.events)
}
return { packs, events }
}
}

View File

@ -1,4 +1,4 @@
import type { CancelReason, File, TaskResultPack } from '@vitest/runner'
import type { CancelReason, File, TaskEventPack, TaskResultPack } from '@vitest/runner'
import type { SnapshotResult } from '@vitest/snapshot'
import type { AfterSuiteRunMeta, TransformMode, UserConsoleLog } from './general'
@ -35,14 +35,13 @@ export interface RuntimeRPC {
force?: boolean
) => Promise<any>
onFinished: (files: File[], errors?: unknown[]) => void
onPathsCollected: (paths: string[]) => void
onUserConsoleLog: (log: UserConsoleLog) => void
onUnhandledError: (err: unknown, type: string) => void
onQueued: (file: File) => void
onCollected: (files: File[]) => Promise<void>
onAfterSuiteRun: (meta: AfterSuiteRunMeta) => void
onTaskUpdate: (pack: TaskResultPack[]) => Promise<void>
onTaskUpdate: (pack: TaskResultPack[], events: TaskEventPack[]) => Promise<void>
onCancel: (reason: CancelReason) => void
getCountOfFailedTests: () => number

View File

@ -1,4 +1,4 @@
import type { Suite, Task } from '@vitest/runner'
import type { File, Suite, Task, TaskEventPack, TaskResultPack } from '@vitest/runner'
import type { Arrayable } from '../types/general'
import { getTests } from '@vitest/runner/utils'
import { toArray } from '@vitest/utils'
@ -18,3 +18,35 @@ export function hasFailedSnapshot(suite: Arrayable<Task>): boolean {
)
})
}
export function convertTasksToEvents(file: File, onTask?: (task: Task) => void): {
packs: TaskResultPack[]
events: TaskEventPack[]
} {
const packs: TaskResultPack[] = []
const events: TaskEventPack[] = []
function visit(suite: Suite | File) {
onTask?.(suite)
packs.push([suite.id, suite.result, suite.meta])
events.push([suite.id, 'suite-prepare'])
suite.tasks.forEach((task) => {
if (task.type === 'suite') {
visit(task)
}
else {
onTask?.(task)
packs.push([task.id, task.result, task.meta])
if (task.mode !== 'skip' && task.mode !== 'todo') {
events.push([task.id, 'test-prepare'], [task.id, 'test-finished'])
}
}
})
events.push([suite.id, 'suite-finished'])
}
visit(file)
return { packs, events }
}

View File

@ -1,7 +1,5 @@
import type { RunnerTestCase } from 'vitest'
import * as pathe from 'pathe'
import { assert, expect, it } from 'vitest'
import { TaskParser } from 'vitest/src/node/reporters/task-parser.js'
import { runVitest } from '../../test-utils'
it('summary', async () => {
@ -35,30 +33,6 @@ it('non-tty', async () => {
}
})
it('reports passed tasks just once', async () => {
const passed: string[] = []
class CustomReporter extends TaskParser {
onTestFinished(_test: RunnerTestCase): void {
passed.push(_test.name)
}
}
await runVitest({
root: pathe.join(import.meta.dirname, '../fixtures/reporter'),
benchmark: {
reporters: new CustomReporter(),
},
}, ['multiple.bench.ts'], 'benchmark')
expect(passed).toMatchInlineSnapshot(`
[
"first",
"second",
]
`)
})
it.for([true, false])('includeSamples %s', async (includeSamples) => {
const result = await runVitest(
{

View File

@ -1,7 +1,7 @@
import type { RunnerTestFile, RunnerTestCase } from 'vitest'
import type { ProcessPool, Vitest } from 'vitest/node'
import { createMethodsRPC } from 'vitest/node'
import { getTasks } from '@vitest/runner/utils'
import { getTasks, generateFileHash } from '@vitest/runner/utils'
import { normalize, relative } from 'pathe'
export default (vitest: Vitest): ProcessPool => {
@ -20,7 +20,7 @@ export default (vitest: Vitest): ProcessPool => {
vitest.logger.console.warn('[pool] running tests for', project.name, 'in', normalize(file).toLowerCase().replace(normalize(process.cwd()).toLowerCase(), ''))
const path = relative(project.config.root, file)
const taskFile: RunnerTestFile = {
id: `${path}${project.name}`,
id: generateFileHash(path, project.config.name),
name: path,
mode: 'run',
meta: {},
@ -49,7 +49,11 @@ export default (vitest: Vitest): ProcessPool => {
}
taskFile.tasks.push(taskTest)
await methods.onCollected([taskFile])
await methods.onTaskUpdate(getTasks(taskFile).map(task => [task.id, task.result, task.meta]))
await methods.onTaskUpdate(getTasks(taskFile).map(task => [
task.id,
task.result,
task.meta,
]), [])
}
},
close() {

View File

@ -18,6 +18,7 @@ it('fails multiple times', () => {
it('skips an option test', { skip: true })
it.skip('skips a .modifier test')
it('skips an ctx.skip() test', (ctx) => ctx.skip())
it('todos an option test', { todo: true })
it.todo('todos a .modifier test')

View File

@ -56,17 +56,17 @@ it('correctly reports a file', () => {
expect(testModule.location).toBeUndefined()
expect(testModule.moduleId).toBe(resolve(root, './1_first.test.ts'))
expect(testModule.project).toBe(project)
expect(testModule.children.size).toBe(16)
expect(testModule.children.size).toBe(17)
const tests = [...testModule.children.tests()]
expect(tests).toHaveLength(11)
expect(tests).toHaveLength(12)
const deepTests = [...testModule.children.allTests()]
expect(deepTests).toHaveLength(21)
expect(deepTests).toHaveLength(22)
expect.soft([...testModule.children.allTests('skipped')]).toHaveLength(7)
expect.soft([...testModule.children.allTests('skipped')]).toHaveLength(8)
expect.soft([...testModule.children.allTests('passed')]).toHaveLength(9)
expect.soft([...testModule.children.allTests('failed')]).toHaveLength(5)
expect.soft([...testModule.children.allTests('running')]).toHaveLength(0)
expect.soft([...testModule.children.allTests('pending')]).toHaveLength(0)
const suites = [...testModule.children.suites()]
expect(suites).toHaveLength(5)
@ -163,6 +163,43 @@ it('correctly reports failed test', () => {
expect(diagnostic.repeatCount).toBe(0)
})
it('correctly reports a skipped test', () => {
const optionTestCase = findTest(testModule.children, 'skips an option test')
expect(optionTestCase.result()).toEqual({
state: 'skipped',
note: undefined,
errors: undefined,
})
const modifierTestCase = findTest(testModule.children, 'skips a .modifier test')
expect(modifierTestCase.result()).toEqual({
state: 'skipped',
note: undefined,
errors: undefined,
})
const ctxSkippedTestCase = findTest(testModule.children, 'skips an ctx.skip() test')
expect(ctxSkippedTestCase.result()).toEqual({
state: 'skipped',
note: undefined,
errors: undefined,
})
const testOptionTodo = findTest(testModule.children, 'todos an option test')
expect(testOptionTodo.result()).toEqual({
state: 'skipped',
note: undefined,
errors: undefined,
})
const testModifierTodo = findTest(testModule.children, 'todos a .modifier test')
expect(testModifierTodo.result()).toEqual({
state: 'skipped',
note: undefined,
errors: undefined,
})
})
it('correctly reports multiple failures', () => {
const testCase = findTest(testModule.children, 'fails multiple times')
const result = testCase.result()!

View File

@ -20,6 +20,9 @@ function buildCtx() {
function buildWorkspace() {
return {
name: 'test',
config: {
root: import.meta.dirname,
},
} as any as WorkspaceProject
}

View File

@ -4,7 +4,6 @@ import { readCoverageMap, runVitest, test } from '../utils'
test('--merge-reports', async () => {
for (const index of [1, 2, 3]) {
await runVitest({
name: `generate #${index} blob report`,
include: ['fixtures/test/merge-fixture-*.test.ts'],
reporters: 'blob',
shard: `${index}/3`,
@ -13,7 +12,6 @@ test('--merge-reports', async () => {
}
await runVitest({
name: 'merge blob reports',
// Pass default value - this option is publicly only available via CLI so it's a bit hacky usage here
mergeReports: '.vitest-reports',
coverage: {

View File

@ -1,40 +0,0 @@
import { beforeAll, beforeEach, afterEach, afterAll, test, describe } from "vitest";
import { setTimeout } from "node:timers/promises";
beforeAll(async () => {
await setTimeout(100);
});
afterAll(async () => {
await setTimeout(100);
});
describe("some suite", async () => {
beforeEach(async () => {
await setTimeout(100);
});
test("some test", async () => {
await setTimeout(100);
});
afterEach(async () => {
await setTimeout(100);
});
});
test("Fast test 1", () => {
//
});
test.skip("Skipped test 1", () => {
//
});
test.concurrent("parallel slow tests 1.1", async () => {
await setTimeout(100);
});
test.concurrent("parallel slow tests 1.2", async () => {
await setTimeout(100);
});

View File

@ -1,40 +0,0 @@
import { beforeAll, beforeEach, afterEach, afterAll, test, describe } from "vitest";
import { setTimeout } from "node:timers/promises";
beforeAll(async () => {
await setTimeout(100);
});
afterAll(async () => {
await setTimeout(100);
});
describe("some suite", async () => {
beforeEach(async () => {
await setTimeout(100);
});
test("some test", async () => {
await setTimeout(100);
});
afterEach(async () => {
await setTimeout(100);
});
});
test("Fast test 1", () => {
//
});
test.skip("Skipped test 1", () => {
//
});
test.concurrent("parallel slow tests 2.1", async () => {
await setTimeout(100);
});
test.concurrent("parallel slow tests 2.2", async () => {
await setTimeout(100);
});

View File

@ -17,10 +17,6 @@ exports[`html reporter > resolves to "failing" status for test file "json-fail"
"prepareDuration": 0,
"result": {
"duration": 0,
"hooks": {
"afterAll": "pass",
"beforeAll": "pass",
},
"startTime": 0,
"state": "fail",
},
@ -67,10 +63,6 @@ exports[`html reporter > resolves to "failing" status for test file "json-fail"
"stackStr": "AssertionError: expected 2 to deeply equal 1",
},
],
"hooks": {
"afterEach": "pass",
"beforeEach": "pass",
},
"repeatCount": 0,
"retryCount": 0,
"startTime": 0,
@ -134,10 +126,6 @@ exports[`html reporter > resolves to "passing" status for test file "all-passing
"prepareDuration": 0,
"result": {
"duration": 0,
"hooks": {
"afterAll": "pass",
"beforeAll": "pass",
},
"startTime": 0,
"state": "pass",
},
@ -155,10 +143,6 @@ exports[`html reporter > resolves to "passing" status for test file "all-passing
"name": "2 + 3 = 5",
"result": {
"duration": 0,
"hooks": {
"afterEach": "pass",
"beforeEach": "pass",
},
"repeatCount": 0,
"retryCount": 0,
"startTime": 0,

View File

@ -58,6 +58,7 @@ describe('{ isTTY: false }', () => {
expect(stdout).toContain('✓ fixtures/ok.test.ts')
expect(stdout).toContain('Test Files 1 passed (1)')
expect(stdout).not.toContain('·')
expect(stderr).toBe('')
})
@ -72,6 +73,7 @@ describe('{ isTTY: false }', () => {
expect(stdout).toContain(' fixtures/some-failing.test.ts (2 tests | 1 failed)')
expect(stdout).toContain('✓ 2 + 3 = 5')
expect(stdout).toContain('× 3 + 3 = 7')
expect(stdout).not.toContain('\n·x\n')
expect(stdout).toContain('Test Files 1 failed (1)')
expect(stdout).toContain('Tests 1 failed | 1 passed')
@ -89,6 +91,7 @@ describe('{ isTTY: false }', () => {
expect(stdout).toContain('↓ fixtures/all-skipped.test.ts (2 tests | 2 skipped)')
expect(stdout).toContain('Test Files 1 skipped (1)')
expect(stdout).toContain('Tests 1 skipped | 1 todo')
expect(stdout).not.toContain('\n--\n')
expect(stderr).toContain('')
})

View File

@ -1,156 +0,0 @@
import type { File, Test } from '@vitest/runner'
import type { Reporter, TestSpecification } from 'vitest/node'
import type { HookOptions } from '../../../packages/vitest/src/node/reporters/task-parser'
import { expect, test } from 'vitest'
import { TaskParser } from '../../../packages/vitest/src/node/reporters/task-parser'
import { runVitest } from '../../test-utils'
test('tasks are reported in correct order', async () => {
const reporter = new TaskReporter()
const { stdout, stderr } = await runVitest({
config: false,
include: ['./fixtures/task-parser-tests/*.test.ts'],
fileParallelism: false,
reporters: [reporter],
sequence: { sequencer: Sorter },
})
expect(stdout).toBe('')
expect(stderr).toBe('')
expect(reporter.calls).toMatchInlineSnapshot(`
[
"|fixtures/task-parser-tests/example-1.test.ts| start",
"|fixtures/task-parser-tests/example-1.test.ts| beforeAll start (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| start",
"|fixtures/task-parser-tests/example-1.test.ts| RUN some test",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach start (test)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| RUN some test",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| afterEach start (test)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| afterAll end (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| DONE some test",
"|fixtures/task-parser-tests/example-1.test.ts| DONE Fast test 1",
"|fixtures/task-parser-tests/example-1.test.ts| RUN parallel slow tests 1.1",
"|fixtures/task-parser-tests/example-1.test.ts| RUN parallel slow tests 1.2",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| DONE parallel slow tests 1.1",
"|fixtures/task-parser-tests/example-1.test.ts| DONE parallel slow tests 1.2",
"|fixtures/task-parser-tests/example-1.test.ts| start",
"|fixtures/task-parser-tests/example-1.test.ts| afterAll start (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| afterAll end (suite)",
"|fixtures/task-parser-tests/example-1.test.ts| DONE Skipped test 1",
"|fixtures/task-parser-tests/example-1.test.ts| finish",
"|fixtures/task-parser-tests/example-2.test.ts| start",
"|fixtures/task-parser-tests/example-2.test.ts| beforeAll start (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| start",
"|fixtures/task-parser-tests/example-2.test.ts| RUN some test",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach start (test)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| RUN some test",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| afterEach start (test)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| afterAll end (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| DONE some test",
"|fixtures/task-parser-tests/example-2.test.ts| DONE Fast test 1",
"|fixtures/task-parser-tests/example-2.test.ts| RUN parallel slow tests 2.1",
"|fixtures/task-parser-tests/example-2.test.ts| RUN parallel slow tests 2.2",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| DONE parallel slow tests 2.1",
"|fixtures/task-parser-tests/example-2.test.ts| DONE parallel slow tests 2.2",
"|fixtures/task-parser-tests/example-2.test.ts| start",
"|fixtures/task-parser-tests/example-2.test.ts| afterAll start (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| afterAll end (suite)",
"|fixtures/task-parser-tests/example-2.test.ts| DONE Skipped test 1",
"|fixtures/task-parser-tests/example-2.test.ts| finish",
]
`)
})
class TaskReporter extends TaskParser implements Reporter {
calls: string[] = []
// @ts-expect-error -- not sure why
onInit(ctx) {
super.onInit(ctx)
}
onTestFilePrepare(file: File) {
this.calls.push(`|${file.name}| start`)
}
onTestFileFinished(file: File) {
this.calls.push(`|${file.name}| finish`)
}
onTestStart(test: Test) {
this.calls.push(`|${test.file.name}| RUN ${test.name}`)
}
onTestFinished(test: Test) {
this.calls.push(`|${test.file.name}| DONE ${test.name}`)
}
onHookStart(options: HookOptions) {
this.calls.push(`|${options.file.name}| ${options.name} start (${options.type})`)
}
onHookEnd(options: HookOptions) {
this.calls.push(`|${options.file.name}| ${options.name} end (${options.type})`)
}
}
class Sorter {
sort(files: TestSpecification[]) {
return files.sort((a, b) => {
const idA = Number.parseInt(
a.moduleId.match(/example-(\d*)\.test\.ts/)![1],
)
const idB = Number.parseInt(
b.moduleId.match(/example-(\d*)\.test\.ts/)![1],
)
if (idA > idB) {
return 1
}
if (idA < idB) {
return -1
}
return 0
})
}
shard(files: TestSpecification[]) {
return files
}
}

File diff suppressed because it is too large Load Diff