diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 991389fe0374..0b74cd97ec47 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -343,6 +343,10 @@ export default ({ mode }: { mode: string }) => { text: 'Task Metadata', link: '/advanced/metadata', }, + { + text: 'Reporters API', + link: '/advanced/api/reporters', + }, ], }, { diff --git a/docs/advanced/api/reporters.md b/docs/advanced/api/reporters.md new file mode 100644 index 000000000000..7510b1ba3457 --- /dev/null +++ b/docs/advanced/api/reporters.md @@ -0,0 +1,242 @@ +# Reporters + +::: warning +This is an advanced API. If you just want to configure built-in reporters, read the ["Reporters"](/guide/reporters) guide. +::: + +Vitest has its own test run lifecycle. These are represented by reporter's methods: + +- [`onInit`](#oninit) +- [`onTestRunStart`](#ontestrun) + - [`onTestModuleQueued`](#ontestmodulequeud) + - [`onTestModuleCollected`](#ontestmodulecollected) + - [`onTestModuleStart`](#ontestmodulestart) + - [`onHookStart(beforeAll)`](#onhookstart) + - [`onHookEnd(beforeAll)`](#onhookend) + - [`onHookStart(beforeEach)`](#onhookstart) + - [`onHookEnd(beforeEach)`](#onhookend) + - [`onTestCaseStart`](#ontestcasestart) + - [`onTestCaseEnd`](#ontestcaseend) + - [`onHookStart(afterEach)`](#onhookstart) + - [`onHookEnd(afterEach)`](#onhookend) + - [`onHookStart(afterAll)`](#onhookstart) + - [`onHookEnd(afterAll)`](#onhookend) + - [`onTestModuleEnd`](#ontestmoduleend) +- [`onTestRunEnd`](#ontestrunend) + +This guide lists all supported reporter methods. However, don't forget that instead of creating your own reporter, you can [extend existing one](/advanced/reporters) instead: + +```ts [custom-reporter.js] +import { BaseReporter } from 'vitest/reporters' + +export default class CustomReporter extends BaseReporter { + onTestRunEnd(testModules, errors) { + console.log(testModule.length, 'tests finished running') + super.onTestRunEnd(testModules, errors) + } +} +``` + +## onInit + +```ts +function onInit(vitest: Vitest): Awaitable +``` + +This method is called when [Vitest](/advanced/api/vitest) was initiated or started, but before the tests were filtered. + +::: info +Internally this method is called inside [`vitest.start`](/advanced/api/vitest#start), [`vitest.init`](/advanced/api/vitest#init) or [`vitest.mergeReports`](/advanced/api/vitest#mergereports). If you are using programmatic API, make sure to call either one dependning on your needs before calling [`vitest.runTestSpecifications`](/advanced/api/vitest#runtestspecifications), for example. Built-in CLI will always run methods in correct order. +::: + +Note that you can also get access to `vitest` instance from test cases, suites and test modules via a [`project`](/advanced/api/test-project) property, but it might also be useful to store a reference to `vitest` in this method. + +::: details Example +```ts +import type { Reporter, TestSpecification } from 'vitest/reporters' +import type { Vitest } from 'vitest/node' + +class MyReporter implements Reporter { + private vitest!: Vitest + + onInit(vitest: Vitest) { + this.vitest = vitest + } + + onTestRunStart(specifications: TestSpecification[]) { + console.log( + specifications.length, + 'test files will run in', + this.vitest.config.root, + ) + } +} + +export default new MyReporter() +``` +::: + +## onTestRunStart + +```ts +function onTestRunStart( + specifications: TestSpecification[] +): Awaitable +``` + +This method is called when a new test run has started. It receives an array of [test specifications](/advanced/api/test-specification) scheduled to run. This array is readonly and available only for information purposes. + +::: details Example +```ts +import type { Reporter, TestSpecification } from 'vitest/reporters' + +class MyReporter implements Reporter { + onTestRunStart(specifications: TestSpecification[]) { + console.log(specifications.length, 'test files will run') + } +} + +export default new MyReporter() +``` +::: + +::: tip +This method was added in Vitest 3, replacing `onPathsCollected` and `onSpecsCollected`, both of which are now deprecated. +::: + +## onTestRunEnd + +```ts +function onTestRunEnd( + testModules: ReadonlyArray, + unhandledErrors: ReadonlyArray, + reason: 'passed' | 'interrupted' | 'failed' +): Awaitable +``` + +This method is called after all tests have finished running and the coverage merged all reports, if it's enabled. Note that you can get the coverage information in [`onCoverage`](#oncoverage) hook. + +It receives a readonly list of test modules. You can iterate over it via a [`testModule.children`](/advanced/api/test-collection) property to report the state and errors, if any. + +The second argument is a readonly list of unhandled errors that Vitest wasn't able to attribute to any test. These can happen outside of the test run because of an error in a plugin, or inside the test run as a side-effect of a non-awaited function (for example, a timeout that threw an error after the test has finished running). + +The third argument indicated why the test run was finished: + +- `passed`: test run was finished normally and there are no errors +- `failed`: test run has at least one error (due to a syntax error during collection or an actual error during test execution) +- `interrupted`: test was interruped by [`vitest.cancelCurrentRun`](/advanced/api/vitest#cancelcurrentrun) call or `Ctrl+C` was pressed in the terminal (note that it's still possible to have failed tests in this case) + +::: details Example +```ts +import type { + Reporter, + SerializedError, + TestModule, + TestRunEndReason, + TestSpecification +} from 'vitest/reporters' + +class MyReporter implements Reporter { + onTestRunEnd( + testModules: ReadonlyArray, + unhandledErrors: ReadonlyArray, + reason: TestRunEndReason, + ) { + if (reason === 'passed') { + testModules.forEach(module => console.log(module.moduleId, 'succeeded')) + } + else if (reason === 'failed') { + // note that this will skip possible errors in suites + // you can get them from testSuite.errors() + for (const testCase of testModules.children.allTests()) { + if (testCase.result().state === 'failed') { + console.log(testCase.fullName, 'in', testCase.module.moduleId, 'failed') + console.log(testCase.result().errors) + } + } + } + else { + console.log('test run was interrupted, skipping report') + } + } +} + +export default new MyReporter() +``` +::: + +::: tip +This method was added in Vitest 3, replacing `onFinished`, which is now deprecated. +::: + +## onCoverage + +```ts +function onCoverage(coverage: unknown): Awaitable +``` + +This hook is called after coverage reports were merged. Vitest doesn't provide coverage type for this method out of the box, but you can import it from `istanbul-lib-coverage` package: + +```ts +import type { CoverageMap } from 'istanbul-lib-coverage' + +declare function onCoverage(coverage: CoverageMap): Awaitable +``` + +## onTestModuleQueued + +```ts +function onTestModuleQueued(testModule: TestModule): Awaitable +``` + +This method is called right before Vitest imports the setup file and the test module itself. This means that `testModule` will have no [`children`](/advanced/api/test-suite#children) yet, but you can start reporting it as the next test to run. + +## onTestModuleCollected + +```ts +function onTestModuleCollected(testModule: TestModule): Awaitable +``` + +This method is called when all tests inside the file were collected, meaning [`testModule.children`](/advanced/api/test-suite#children) collection is populated, but tests don't have any results yet. + +## onTestModuleStart + +```ts +function onTestModuleStart(testModule: TestModule): Awaitable +``` + +This method is called right after [`onTestModuleCollected`](#ontestmodulecollected) unless Vitest runs in collection mode ([`vitest.collect()`](/advanced/api/vitest#collect) or `vitest collect` in the CLI), in this case it will not be called at all because there are no tests to run. + +## onTestModuleEnd + +```ts +function onTestModuleEnd(testModule: TestModule): Awaitable +``` + +This method is called when every test in the module finished running. This means, every test inside [`testModule.children`](/advanced/api/test-suite#children) will have a `test.result()` that is not equal to `pending`. + +## onHookStart + +::: warning +`onHookStart` and `onHookEnd` methods will not be called if these hooks did not run during the test run. +::: + +## onHookEnd + +::: warning +`onHookStart` and `onHookEnd` methods will not be called if these hooks did not run during the test run. +::: + +## onTestCaseStart + +```ts +function onTestCaseStart(testCase: TestCase): Awaitable +``` + +This method is called when the test starts to run. + +::: warning +Notice that it's possible to have [`testCase.result()`](/advanced/api/test-case#result) with `passed` or `failed` state already when `onTestCaseStart` is called. This can happen if test was running too fast and both `onTestCaseStart` and `onTestCaseEnd` were scheduled to run in the same microtask. +::: + +## onTestCaseEnd diff --git a/docs/advanced/api/test-case.md b/docs/advanced/api/test-case.md index 3f5046685f89..e845a111b5d4 100644 --- a/docs/advanced/api/test-case.md +++ b/docs/advanced/api/test-case.md @@ -10,31 +10,6 @@ if (task.type === 'test') { } ``` -::: warning -We are planning to introduce a new Reporter API that will be using this API by default. For now, the Reporter API uses [runner tasks](/advanced/runner#tasks), but you can still access `TestCase` via `vitest.state.getReportedEntity` method: - -```ts -import type { RunnerTestFile, TestModule, Vitest } from 'vitest/node' - -class Reporter { - private vitest!: Vitest - - onInit(vitest: Vitest) { - this.vitest = vitest - } - - onFinished(files: RunnerTestFile[]) { - for (const file of files) { - const testModule = this.vitest.getReportedEntity(file) as TestModule - for (const test of testModule.children.allTests()) { - console.log(test) // TestCase - } - } - } -} -``` -::: - ## project This references the [`TestProject`](/advanced/api/test-project) that the test belongs to. @@ -124,12 +99,13 @@ Parent [suite](/advanced/api/test-suite). If the test was called directly inside ```ts interface TaskOptions { - each: boolean | undefined - concurrent: boolean | undefined - shuffle: boolean | undefined - retry: number | undefined - repeats: number | undefined - mode: 'run' | 'only' | 'skip' | 'todo' + readonly each: boolean | undefined + readonly fails: boolean | undefined + readonly concurrent: boolean | undefined + readonly shuffle: boolean | undefined + readonly retry: number | undefined + readonly repeats: number | undefined + readonly mode: 'run' | 'only' | 'skip' | 'todo' | 'queued' } ``` @@ -143,14 +119,6 @@ function ok(): boolean Checks if the test did not fail the suite. If the test is not finished yet or was skipped, it will return `true`. -## skipped - -```ts -function skipped(): boolean -``` - -Checks if the test was skipped during collection or dynamically with `ctx.skip()`. - ## meta ```ts @@ -174,10 +142,23 @@ If the test did not finish running yet, the meta will be an empty object. ## result ```ts -function result(): TestResult | undefined +function result(): TestResult ``` -Test results. It will be `undefined` if test is skipped during collection, not finished yet or was just collected. +Test results. If test is not finished yet or was just collected, it will be equal to `TestResultPending`: + +```ts +export interface TestResultPending { + /** + * The test was collected, but didn't finish running yet. + */ + readonly state: 'pending' + /** + * Pending tests have no errors. + */ + readonly errors: undefined +} +``` If the test was skipped, the return value will be `TestResultSkipped`: @@ -187,15 +168,15 @@ interface TestResultSkipped { * The test was skipped with `skip` or `todo` flag. * You can see which one was used in the `options.mode` option. */ - state: 'skipped' + readonly state: 'skipped' /** * Skipped tests have no errors. */ - errors: undefined + readonly errors: undefined /** * A custom note passed down to `ctx.skip(note)`. */ - note: string | undefined + readonly note: string | undefined } ``` @@ -210,26 +191,26 @@ interface TestResultFailed { /** * The test failed to execute. */ - state: 'failed' + readonly state: 'failed' /** * Errors that were thrown during the test execution. */ - errors: TestError[] + readonly errors: ReadonlyArray } ``` -If the test passed, the retunr value will be `TestResultPassed`: +If the test passed, the return value will be `TestResultPassed`: ```ts interface TestResultPassed { /** * The test passed successfully. */ - state: 'passed' + readonly state: 'passed' /** * Errors that were thrown during the test execution. */ - errors: TestError[] | undefined + readonly errors: ReadonlyArray | undefined } ``` @@ -250,32 +231,36 @@ interface TestDiagnostic { /** * If the duration of the test is above `slowTestThreshold`. */ - slow: boolean + readonly slow: boolean /** * The amount of memory used by the test in bytes. * This value is only available if the test was executed with `logHeapUsage` flag. */ - heap: number | undefined + readonly heap: number | undefined /** * The time it takes to execute the test in ms. */ - duration: number + readonly duration: number /** * The time in ms when the test started. */ - startTime: number + readonly startTime: number /** * The amount of times the test was retried. */ - retryCount: number + readonly retryCount: number /** * The amount of times the test was repeated as configured by `repeats` option. * This value can be lower if the test failed during the repeat and no `retry` is configured. */ - repeatCount: number + readonly repeatCount: number /** * If test passed on a second retry. */ - flaky: boolean + readonly flaky: boolean } ``` + +::: info +`diagnostic()` will return `undefined` if the test did not finish running yet. +::: diff --git a/docs/advanced/api/test-collection.md b/docs/advanced/api/test-collection.md index 974f37dbd11d..988f9d961467 100644 --- a/docs/advanced/api/test-collection.md +++ b/docs/advanced/api/test-collection.md @@ -57,16 +57,14 @@ for (const suite of module.children.allSuites()) { ## allTests ```ts -function allTests( - state?: TestResult['state'] | 'running' -): Generator +function allTests(state?: TestState): Generator ``` Filters all tests that are part of this collection and its children. ```ts for (const test of module.children.allTests()) { - if (!test.result()) { + if (test.result().state === 'pending') { console.log('test', test.fullName, 'did not finish') } } @@ -77,9 +75,7 @@ You can pass down a `state` value to filter tests by the state. ## tests ```ts -function tests( - state?: TestResult['state'] | 'running' -): Generator +function tests(state?: TestState): Generator ``` Filters only the tests that are part of this collection. You can pass down a `state` value to filter tests by the state. diff --git a/docs/advanced/api/test-module.md b/docs/advanced/api/test-module.md index 07ae12305712..95c49edcb565 100644 --- a/docs/advanced/api/test-module.md +++ b/docs/advanced/api/test-module.md @@ -10,35 +10,20 @@ if (task.type === 'module') { } ``` -The `TestModule` inherits all methods and properties from the [`TestSuite`](/advanced/api/test-module). This guide will only list methods and properties unique to the `TestModule` - -::: warning -We are planning to introduce a new Reporter API that will be using this API by default. For now, the Reporter API uses [runner tasks](/advanced/runner#tasks), but you can still access `TestModule` via `vitest.state.getReportedEntity` method: - -```ts -import type { RunnerTestFile, TestModule, Vitest } from 'vitest/node' - -class Reporter { - private vitest!: Vitest - - onInit(vitest: Vitest) { - this.vitest = vitest - } - - onFinished(files: RunnerTestFile[]) { - for (const file of files) { - const testModule = this.vitest.state.getReportedEntity(file) as TestModule - console.log(testModule) // TestModule - } - } -} -``` +::: warning Extending Suite Methods +The `TestModule` class inherits all methods and properties from the [`TestSuite`](/advanced/api/test-module). This guide will only list methods and properties unique to the `TestModule`. ::: ## moduleId This is usually an absolute unix file path (even on Windows). It can be a virtual id if the file is not on the disk. This value corresponds to Vite's `ModuleGraph` id. +```ts +'C:/Users/Documents/project/example.test.ts' // ✅ +'/Users/mac/project/example.test.ts' // ✅ +'C:\\Users\\Documents\\project\\example.test.ts' // ❌ +``` + ## diagnostic ```ts @@ -52,23 +37,23 @@ interface ModuleDiagnostic { /** * The time it takes to import and initiate an environment. */ - environmentSetupDuration: number + readonly environmentSetupDuration: number /** * The time it takes Vitest to setup test harness (runner, mocks, etc.). */ - prepareDuration: number + readonly prepareDuration: number /** * The time it takes to import the test module. * This includes importing everything in the module and executing suite callbacks. */ - collectDuration: number + readonly collectDuration: number /** * The time it takes to import the setup module. */ - setupDuration: number + readonly setupDuration: number /** * Accumulated duration of all tests and hooks in the module. */ - duration: number + readonly duration: number } ``` diff --git a/docs/advanced/api/test-specification.md b/docs/advanced/api/test-specification.md index 3fefba0c8954..b6e0e91e4597 100644 --- a/docs/advanced/api/test-specification.md +++ b/docs/advanced/api/test-specification.md @@ -13,6 +13,10 @@ const specification = project.createSpecification( `createSpecification` expects resolved module ID. It doesn't auto-resolve the file or check that it exists on the file system. +## taskId + +[Test module's](/advanced/api/test-suite#id) identifier. + ## project This references the [`TestProject`](/advanced/api/test-project) that the test module belongs to. @@ -27,6 +31,10 @@ The ID of the module in Vite's module graph. Usually, it's an absolute file path 'C:\\Users\\Documents\\project\\example.test.ts' // ❌ ``` +## testModule + +Instance of [`TestModule`](/advanced/api/test-module) assosiated with the specification. If test wasn't queued yet, this will be `undefined`. + ## pool experimental {#pool} The [`pool`](/config/#pool) in which the test module will run. diff --git a/docs/advanced/api/test-suite.md b/docs/advanced/api/test-suite.md index 85673c435766..713e0b90855c 100644 --- a/docs/advanced/api/test-suite.md +++ b/docs/advanced/api/test-suite.md @@ -10,31 +10,6 @@ if (task.type === 'suite') { } ``` -::: warning -We are planning to introduce a new Reporter API that will be using this API by default. For now, the Reporter API uses [runner tasks](/advanced/runner#tasks), but you can still access `TestSuite` via `vitest.state.getReportedEntity` method: - -```ts -import type { RunnerTestFile, TestModule, Vitest } from 'vitest/node' - -class Reporter { - private vitest!: Vitest - - onInit(vitest: Vitest) { - this.vitest = vitest - } - - onFinished(files: RunnerTestFile[]) { - for (const file of files) { - const testModule = this.vitest.state.getReportedEntity(file) as TestModule - for (const suite of testModule.children.allSuites()) { - console.log(suite) // TestSuite - } - } - } -} -``` -::: - ## project This references the [`TestProject`](/advanced/api/test-project) that the test belongs to. @@ -125,12 +100,13 @@ Parent suite. If the suite was called directly inside the [module](/advanced/api ```ts interface TaskOptions { - each: boolean | undefined - concurrent: boolean | undefined - shuffle: boolean | undefined - retry: number | undefined - repeats: number | undefined - mode: 'run' | 'only' | 'skip' | 'todo' + readonly each: boolean | undefined + readonly fails: boolean | undefined + readonly concurrent: boolean | undefined + readonly shuffle: boolean | undefined + readonly retry: number | undefined + readonly repeats: number | undefined + readonly mode: 'run' | 'only' | 'skip' | 'todo' | 'queued' } ``` @@ -153,7 +129,21 @@ for (const task of suite.children) { ``` ::: warning -Note that `suite.children` will only iterate the first level of nesting, it won't go deeper. +Note that `suite.children` will only iterate the first level of nesting, it won't go deeper. If you need to iterate over all tests or suites, use [`children.allTests()`](/advanced/api/test-collection#alltests) or [`children.allSuites()`](/advanced/api/test-collection#allsuites). If you need to iterate over everything, use recursive function: + +```ts +function visit(collection: TestCollection) { + for (const task of collection) { + if (task.type === 'suite') { + // report a suite + visit(task.children) + } + else { + // report a test + } + } +} +``` ::: ## ok @@ -164,13 +154,19 @@ function ok(): boolean Checks if the suite has any failed tests. This will also return `false` if suite failed during collection. In that case, check the [`errors()`](#errors) for thrown errors. -## skipped +## state ```ts -function skipped(): boolean +function state(): TestSuiteState ``` -Checks if the suite was skipped during collection. +Checks the running state of the suite. Possible return values: + +- **queued**: the test module was queued to run. Only [`TestModule`](/advanced/api/test-module) can have this state. +- **pending**: the tests in this suite did not finish running yet. +- **failed**: this suite has failed tests or they couldn't be collected. If [`errors()`](#errors) is not empty, it means the suite failed to collect tests. +- **passed**: every test inside this suite has passed. +- **skipped**: this suite was skipped during collection. ## errors @@ -189,5 +185,5 @@ describe('collection failed', () => { ``` ::: warning -Note that errors are serialized into simple object: `instanceof Error` will always return `false`. +Note that errors are serialized into simple objects: `instanceof Error` will always return `false`. ::: diff --git a/docs/advanced/metadata.md b/docs/advanced/metadata.md index 6efd276269f2..1d75355d6410 100644 --- a/docs/advanced/metadata.md +++ b/docs/advanced/metadata.md @@ -20,26 +20,23 @@ test('custom', ({ task }) => { }) ``` -Once a test is completed, Vitest will send a task including the result and `meta` to the Node.js process using RPC. To intercept and process this task, you can utilize the `onTaskUpdate` method available in your reporter implementation: +Once a test is completed, Vitest will send a task including the result and `meta` to the Node.js process using RPC, and then report it in `onTestCaseEnd` and other hooks that have access to tasks. To process this test case, you can utilize the `onTestCaseEnd` method available in your reporter implementation: ```ts [custom-reporter.js] +import type { Reporter, TestCase, TestModule } from 'vitest/reporters' + export default { - // you can intercept packs if needed - onTaskUpdate(packs) { - const [id, result, meta] = packs[0] + onTestCaseEnd(testCase: TestCase) { + // custom === 'some-custom-handler' ✅ + const { custom } = testCase.meta() }, - // meta is located on every task inside "onFinished" - onFinished(files) { - files[0].meta.done === true - files[0].tasks[0].meta.custom === 'some-custom-handler' + onTestRunFinished(testModule: TestModule) { + testModule.meta().done === true + testModule.children.at(0).meta().custom === 'some-custom-handler' } -} +} satisfies Reporter ``` -::: warning -Vitest can send several tasks at the same time if several tests are completed in a short period of time. -::: - ::: danger BEWARE Vitest uses different methods to communicate with the Node.js process. @@ -56,9 +53,11 @@ You can also get this information from Vitest state when tests finished running: ```ts const vitest = await createVitest('test') -await vitest.start() -vitest.state.getFiles()[0].meta.done === true -vitest.state.getFiles()[0].tasks[0].meta.custom === 'some-custom-handler' +const { testModules } = await vitest.start() + +const testModule = testModules[0] +testModule.meta().done === true +testModule.children.at(0).meta().custom === 'some-custom-handler' ``` It's also possible to extend type definitions when using TypeScript: diff --git a/docs/api/index.md b/docs/api/index.md index a269d94c9b13..fadfe14233dc 100644 --- a/docs/api/index.md +++ b/docs/api/index.md @@ -1179,6 +1179,16 @@ test('performs an organization query', async () => { ::: tip This hook is always called in reverse order and is not affected by [`sequence.hooks`](/config/#sequence-hooks) option. + + +Note that this hook is not called if test was skipped with a dynamic `ctx.skip()` call: + +```ts{2} +test('skipped dynamically', (t) => { + onTestFinished(() => {}) // not called + t.skip() +}) +``` ::: ### onTestFailed diff --git a/packages/browser/src/node/rpc.ts b/packages/browser/src/node/rpc.ts index 45c4a5b22c3a..b0a89dd961d7 100644 --- a/packages/browser/src/node/rpc.ts +++ b/packages/browser/src/node/rpc.ts @@ -1,6 +1,6 @@ import type { Duplex } from 'node:stream' import type { ErrorWithDiff } from 'vitest' -import type { BrowserCommandContext, ResolveSnapshotPathHandlerContext, TestModule, TestProject } from 'vitest/node' +import type { BrowserCommandContext, ResolveSnapshotPathHandlerContext, TestProject } from 'vitest/node' import type { WebSocket } from 'ws' import type { ParentBrowserProject } from './projectParent' import type { BrowserServerState } from './state' @@ -111,23 +111,19 @@ export function setupBrowserRpc(globalServer: ParentBrowserProject) { vitest.state.catchError(error, type) }, async onQueued(file) { - vitest.state.collectFiles(project, [file]) - const testModule = vitest.state.getReportedEntity(file) as TestModule - await vitest.report('onTestModuleQueued', testModule) + await vitest._testRun.enqueued(project, file) }, async onCollected(files) { - vitest.state.collectFiles(project, files) - await vitest.report('onCollected', files) + await vitest._testRun.collected(project, files) }, async onTaskUpdate(packs) { - vitest.state.updateTasks(packs) - await vitest.report('onTaskUpdate', packs) + await vitest._testRun.updated(packs) }, onAfterSuiteRun(meta) { vitest.coverageProvider?.onAfterSuiteRun(meta) }, sendLog(log) { - return vitest.report('onUserConsoleLog', log) + return vitest._testRun.log(log) }, resolveSnapshotPath(testPath) { return vitest.snapshot.resolvePath(testPath, { diff --git a/packages/browser/src/node/types.ts b/packages/browser/src/node/types.ts index 0276dea771c3..401e91583296 100644 --- a/packages/browser/src/node/types.ts +++ b/packages/browser/src/node/types.ts @@ -7,7 +7,7 @@ export interface WebSocketBrowserHandlers { resolveSnapshotRawPath: (testPath: string, rawPath: string) => string onUnhandledError: (error: unknown, type: string) => Promise onQueued: (file: RunnerTestFile) => void - onCollected: (files?: RunnerTestFile[]) => Promise + onCollected: (files: RunnerTestFile[]) => Promise onTaskUpdate: (packs: TaskResultPack[]) => void onAfterSuiteRun: (meta: AfterSuiteRunMeta) => void onCancel: (reason: CancelReason) => void diff --git a/packages/runner/src/hooks.ts b/packages/runner/src/hooks.ts index be1b38f10ff0..ecc7888e5b96 100644 --- a/packages/runner/src/hooks.ts +++ b/packages/runner/src/hooks.ts @@ -160,6 +160,8 @@ export const onTestFailed: TaskHook = createTestHook( * * **Note:** The `onTestFinished` hooks are running in reverse order of their registration. You can configure this by changing the `sequence.hooks` option in the config file. * + * **Note:** The `onTestFinished` hook is not called if the test is canceled with a dynamic `ctx.skip()` call. + * * @param {Function} fn - The callback function to be executed after a test finishes. The function can receive parameters providing details about the completed test, including its success or failure status. * @param {number} [timeout] - Optional timeout in milliseconds for the hook. If not provided, the default hook timeout from the runner's configuration is used. * @throws {Error} Throws an error if the function is not called within a test. diff --git a/packages/runner/src/run.ts b/packages/runner/src/run.ts index e29f54435946..9aa40b1c4d5c 100644 --- a/packages/runner/src/run.ts +++ b/packages/runner/src/run.ts @@ -3,7 +3,6 @@ import type { DiffOptions } from '@vitest/utils/diff' import type { FileSpecification, VitestRunner } from './types/runner' import type { File, - HookCleanupCallback, HookListener, SequenceHooks, Suite, @@ -13,6 +12,7 @@ import type { TaskResult, TaskResultPack, TaskState, + TaskUpdateEvent, Test, TestContext, } from './types/tasks' @@ -31,21 +31,26 @@ const now = globalThis.performance ? globalThis.performance.now.bind(globalThis. const unixNow = Date.now function updateSuiteHookState( - suite: Task, + task: Task, name: keyof SuiteHooks, state: TaskState, runner: VitestRunner, ) { - if (!suite.result) { - suite.result = { state: 'run' } + if (!task.result) { + task.result = { state: 'run' } } - if (!suite.result?.hooks) { - suite.result.hooks = {} + if (!task.result.hooks) { + task.result.hooks = {} } - const suiteHooks = suite.result.hooks + const suiteHooks = task.result.hooks if (suiteHooks) { suiteHooks[name] = state - updateTask(suite, runner) + + updateTask( + state === 'run' ? 'suite-hook-start' : 'suite-hook-end', + task, + runner, + ) } } @@ -113,10 +118,10 @@ export async function callSuiteHook( name: T, runner: VitestRunner, args: SuiteHooks[T][0] extends HookListener ? A : never, -): Promise { +): Promise { const sequence = runner.config.sequence.hooks - const callbacks: HookCleanupCallback[] = [] + const callbacks: unknown[] = [] // stop at file level const parentSuite: Suite | null = 'filepath' in suite ? null : suite.suite || suite.file @@ -130,6 +135,10 @@ export async function callSuiteHook( const hooks = getSuiteHooks(suite, name, sequence) + if (hooks.length === 0) { + return callbacks + } + if (sequence === 'parallel') { callbacks.push( ...(await Promise.all(hooks.map(hook => (hook as any)(...args)))), @@ -152,12 +161,14 @@ export async function callSuiteHook( return callbacks } -const packs = new Map() +const packs = new Map() let updateTimer: any let previousUpdate: Promise | undefined -export function updateTask(task: Task, runner: VitestRunner): void { - packs.set(task.id, [task.result, task.meta]) +export function updateTask(event: TaskUpdateEvent, task: Task, runner: VitestRunner): void { + const events = packs.get(task.id)?.[2] || [] + events.push(event) + packs.set(task.id, [task.result, task.meta, events]) const { clearTimeout, setTimeout } = getSafeTimers() @@ -174,7 +185,7 @@ async function sendTasksUpdate(runner: VitestRunner) { if (packs.size) { const taskPacks = Array.from(packs).map(([id, task]) => { - return [id, task[0], task[1]] + return [id, task[0], task[1], task[2]] }) const p = runner.onTaskUpdate?.(taskPacks) packs.clear() @@ -182,7 +193,7 @@ async function sendTasksUpdate(runner: VitestRunner) { } } -async function callCleanupHooks(cleanups: HookCleanupCallback[]) { +async function callCleanupHooks(cleanups: unknown[]) { await Promise.all( cleanups.map(async (fn) => { if (typeof fn !== 'function') { @@ -201,7 +212,10 @@ export async function runTest(test: Test, runner: VitestRunner): Promise { } if (test.result?.state === 'fail') { - updateTask(test, runner) + // should not be possible to get here, I think this is just copy pasted from suite + // TODO: maybe someone fails tests in `beforeAll` hooks? + // https://github.com/vitest-dev/vitest/pull/7069 + updateTask('test-failed-early', test, runner) return } @@ -212,7 +226,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise { startTime: unixNow(), retryCount: 0, } - updateTask(test, runner) + updateTask('test-prepare', test, runner) setCurrentTest(test) @@ -222,7 +236,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise { for (let repeatCount = 0; repeatCount <= repeats; repeatCount++) { const retry = test.retry ?? 0 for (let retryCount = 0; retryCount <= retry; retryCount++) { - let beforeEachCleanups: HookCleanupCallback[] = [] + let beforeEachCleanups: unknown[] = [] try { await runner.onBeforeTryTask?.(test, { retry: retryCount, @@ -274,7 +288,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise { if (test.pending || test.result?.state === 'skip') { test.mode = 'skip' test.result = { state: 'skip', note: test.result?.note } - updateTask(test, runner) + updateTask('test-finished', test, runner) setCurrentTest(undefined) return } @@ -309,8 +323,8 @@ export async function runTest(test: Test, runner: VitestRunner): Promise { ) } - delete test.onFailed - delete test.onFinished + test.onFailed = undefined + test.onFinished = undefined if (test.result.state === 'pass') { break @@ -323,7 +337,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise { } // update retry info - updateTask(test, runner) + updateTask('test-retried', test, runner) } } @@ -346,7 +360,7 @@ export async function runTest(test: Test, runner: VitestRunner): Promise { await runner.onAfterRunTask?.(test) - updateTask(test, runner) + updateTask('test-finished', test, runner) } function failTask(result: TaskResult, err: unknown, diffOptions: DiffOptions | undefined) { @@ -369,7 +383,7 @@ function markTasksAsSkipped(suite: Suite, runner: VitestRunner) { suite.tasks.forEach((t) => { t.mode = 'skip' t.result = { ...t.result, state: 'skip' } - updateTask(t, runner) + updateTask('test-finished', t, runner) if (t.type === 'suite') { markTasksAsSkipped(t, runner) } @@ -381,20 +395,23 @@ export async function runSuite(suite: Suite, runner: VitestRunner): Promise( { async onTaskUpdate(packs) { - ctx.state.updateTasks(packs) - await ctx.report('onTaskUpdate', packs) + await ctx._testRun.updated(packs) }, getFiles() { return ctx.state.getFiles() diff --git a/packages/vitest/src/node/cli/cli-api.ts b/packages/vitest/src/node/cli/cli-api.ts index 5edaac0e073a..77d2fc283ebe 100644 --- a/packages/vitest/src/node/cli/cli-api.ts +++ b/packages/vitest/src/node/cli/cli-api.ts @@ -256,7 +256,7 @@ export function formatCollectedAsJSON(files: TestModule[]) { files.forEach((file) => { for (const test of file.children.allTests()) { - if (test.skipped()) { + if (test.result().state === 'skipped') { continue } const result: TestCollectJSONResult = { @@ -280,7 +280,7 @@ export function formatCollectedAsString(testModules: TestModule[]) { testModules.forEach((testModule) => { for (const test of testModule.children.allTests()) { - if (test.skipped()) { + if (test.result().state === 'skipped') { continue } const fullName = `${test.module.task.name} > ${test.fullName}` diff --git a/packages/vitest/src/node/core.ts b/packages/vitest/src/node/core.ts index fe9f4e36fd70..efc2ec0647a8 100644 --- a/packages/vitest/src/node/core.ts +++ b/packages/vitest/src/node/core.ts @@ -36,6 +36,7 @@ import { BlobReporter, readBlobs } from './reporters/blob' import { createBenchmarkReporters, createReporters } from './reporters/utils' import { VitestSpecifications } from './specifications' import { StateManager } from './state' +import { TestRun } from './test-run' import { VitestWatcher } from './watcher' import { resolveBrowserWorkspace, resolveWorkspace } from './workspace/resolveWorkspace' @@ -94,6 +95,7 @@ export class Vitest { /** @internal */ reporters: Reporter[] = undefined! /** @internal */ vitenode: ViteNodeServer = undefined! /** @internal */ runner: ViteNodeRunner = undefined! + /** @internal */ _testRun: TestRun = undefined! private isFirstRun = true private restartsCount = 0 @@ -213,6 +215,7 @@ export class Vitest { this._state = new StateManager() this._cache = new VitestCache(this.version) this._snapshot = new SnapshotManager({ ...resolved.snapshotOptions }) + this._testRun = new TestRun(this) if (this.config.watch) { this.watcher.registerWatcher() @@ -447,25 +450,30 @@ export class Vitest { await this.report('onInit', this) await this.report('onPathsCollected', files.flatMap(f => f.filepath)) - const workspaceSpecs = new Map() + const filesByProject = new Map() + const specifications: TestSpecification[] = [] for (const file of files) { const project = this.getProjectByName(file.projectName || '') - const specs = workspaceSpecs.get(project) || [] + const specs = filesByProject.get(project) || [] specs.push(file) - workspaceSpecs.set(project, specs) + filesByProject.set(project, specs) + specifications.push( + project.createSpecification(file.filepath, undefined, file.pool), + ) + // TODO: how to integrate queue state with mergeReports? + // await this._testRun.enqueued(project, file).catch(noop) } + await this._testRun.start(specifications).catch(noop) - for (const [project, files] of workspaceSpecs) { + for (const [project, files] of filesByProject) { const filepaths = files.map(f => f.filepath) this.state.clearFiles(project, filepaths) files.forEach((file) => { file.logs?.forEach(log => this.state.updateUserLog(log)) }) - this.state.collectFiles(project, files) + await this._testRun.collected(project, files).catch(noop) } - await this.report('onCollected', files).catch(noop) - for (const file of files) { const logs: UserConsoleLog[] = [] const taskPacks: TaskResultPack[] = [] @@ -475,15 +483,26 @@ export class Vitest { if (task.logs) { logs.push(...task.logs) } - taskPacks.push([task.id, task.result, task.meta]) + if (task.type === 'test') { + taskPacks.push( + [task.id, undefined, {}, ['test-prepare']], + [task.id, task.result, task.meta, ['test-finished']], + ) + } + else if (task.type === 'suite') { + taskPacks.push( + [task.id, undefined, {}, ['suite-prepare']], + [task.id, task.result, task.meta, ['suite-finished']], + ) + } } logs.sort((log1, log2) => log1.time - log2.time) for (const log of logs) { - await this.report('onUserConsoleLog', log).catch(noop) + await this._testRun.log(log).catch(noop) } - await this.report('onTaskUpdate', taskPacks).catch(noop) + await this._testRun.updated(taskPacks).catch(noop) } if (hasFailed(files)) { @@ -491,7 +510,7 @@ export class Vitest { } this._checkUnhandledErrors(errors) - await this.report('onFinished', files, errors) + await this._testRun.end(specifications, errors, coverages).catch(noop) await this.initCoverageProvider() await this.coverageProvider?.mergeReports?.(coverages) @@ -669,6 +688,7 @@ export class Vitest { await this.report('onPathsCollected', filepaths) await this.report('onSpecsCollected', specs.map(spec => spec.toJSON())) + await this._testRun.start(specs) // previous run await this.runningPromise @@ -715,13 +735,12 @@ export class Vitest { } } finally { - // can be duplicate files if different projects are using the same file - const files = Array.from(new Set(specs.map(spec => spec.moduleId))) - const errors = this.state.getUnhandledErrors() + // TODO: wait for coverage only if `onFinished` is defined const coverage = await this.coverageProvider?.generateCoverage({ allTestsRun }) + const errors = this.state.getUnhandledErrors() this._checkUnhandledErrors(errors) - await this.report('onFinished', this.state.getFiles(files), errors, coverage) + await this._testRun.end(specs, errors, coverage) await this.reportCoverage(coverage, allTestsRun) } })() diff --git a/packages/vitest/src/node/pools/rpc.ts b/packages/vitest/src/node/pools/rpc.ts index 919e15d6d9eb..9578f347790d 100644 --- a/packages/vitest/src/node/pools/rpc.ts +++ b/packages/vitest/src/node/pools/rpc.ts @@ -1,7 +1,6 @@ import type { RawSourceMap } from 'vite-node' import type { RuntimeRPC } from '../../types/rpc' import type { TestProject } from '../project' -import type { TestModule } from '../reporters/reported-tasks' import type { ResolveSnapshotPathHandlerContext } from '../types/config' import { mkdir, writeFile } from 'node:fs/promises' import { join } from 'pathe' @@ -15,7 +14,7 @@ interface MethodsOptions { } export function createMethodsRPC(project: TestProject, options: MethodsOptions = {}): RuntimeRPC { - const ctx = project.ctx + const ctx = project.vitest const cacheFs = options.cacheFs ?? false return { snapshotSaved(snapshot) { @@ -79,35 +78,24 @@ export function createMethodsRPC(project: TestProject, options: MethodsOptions = ctx.state.collectPaths(paths) return ctx.report('onPathsCollected', paths) }, - onQueued(file) { - ctx.state.collectFiles(project, [file]) - const testModule = ctx.state.getReportedEntity(file) as TestModule - return ctx.report('onTestModuleQueued', testModule) + async onQueued(file) { + await ctx._testRun.enqueued(project, file) }, - onCollected(files) { - ctx.state.collectFiles(project, files) - return ctx.report('onCollected', files) + async onCollected(files) { + await ctx._testRun.collected(project, files) }, onAfterSuiteRun(meta) { ctx.coverageProvider?.onAfterSuiteRun(meta) }, - onTaskUpdate(packs) { - ctx.state.updateTasks(packs) - return ctx.report('onTaskUpdate', packs) + async onTaskUpdate(packs) { + await ctx._testRun.updated(packs) }, - onUserConsoleLog(log) { - ctx.state.updateUserLog(log) - ctx.report('onUserConsoleLog', log) + async onUserConsoleLog(log) { + await ctx._testRun.log(log) }, onUnhandledError(err, type) { ctx.state.catchError(err, type) }, - onFinished(files) { - const errors = ctx.state.getUnhandledErrors() - ctx._checkUnhandledErrors(errors) - - return ctx.report('onFinished', files, errors) - }, onCancel(reason) { ctx.cancelCurrentRun(reason) }, diff --git a/packages/vitest/src/node/pools/typecheck.ts b/packages/vitest/src/node/pools/typecheck.ts index 6bb0f5704ade..e2ee87bbda4d 100644 --- a/packages/vitest/src/node/pools/typecheck.ts +++ b/packages/vitest/src/node/pools/typecheck.ts @@ -19,7 +19,7 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool { ) { const checker = project.typechecker! - await ctx.report('onTaskUpdate', checker.getTestPacks()) + await ctx._testRun.updated(checker.getTestPacks()) if (!project.config.typecheck.ignoreSourceErrors) { sourceErrors.forEach(error => @@ -62,8 +62,7 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool { checker.setFiles(files) checker.onParseStart(async () => { - ctx.state.collectFiles(project, checker.getTestFiles()) - await ctx.report('onCollected') + await ctx._testRun.collected(project, checker.getTestFiles()) }) checker.onParseEnd(result => onParseEnd(project, result)) @@ -81,10 +80,9 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool { } await checker.collectTests() - ctx.state.collectFiles(project, checker.getTestFiles()) - await ctx.report('onTaskUpdate', checker.getTestPacks()) - await ctx.report('onCollected') + await ctx._testRun.collected(project, checker.getTestFiles()) + await ctx._testRun.updated(checker.getTestPacks()) }) await checker.prepare() @@ -108,8 +106,7 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool { const checker = await createWorkspaceTypechecker(project, files) checker.setFiles(files) await checker.collectTests() - ctx.state.collectFiles(project, checker.getTestFiles()) - await ctx.report('onCollected') + await ctx._testRun.collected(project, checker.getTestFiles()) } } @@ -136,8 +133,7 @@ export function createTypecheckPool(ctx: Vitest): ProcessPool { }) const triggered = await _p if (project.typechecker && !triggered) { - ctx.state.collectFiles(project, project.typechecker.getTestFiles()) - await ctx.report('onCollected') + await ctx._testRun.collected(project, project.typechecker.getTestFiles()) await onParseEnd(project, project.typechecker.getResult()) continue } diff --git a/packages/vitest/src/node/reporters/default.ts b/packages/vitest/src/node/reporters/default.ts index 76181ec27f6c..5fe3d6318d4a 100644 --- a/packages/vitest/src/node/reporters/default.ts +++ b/packages/vitest/src/node/reporters/default.ts @@ -1,7 +1,7 @@ -import type { File, TaskResultPack } from '@vitest/runner' +import type { File } from '@vitest/runner' import type { Vitest } from '../core' import type { BaseOptions } from './base' -import type { TestModule } from './reported-tasks' +import type { HookOptions, TestCase, TestModule } from './reported-tasks' import { BaseReporter } from './base' import { SummaryReporter } from './summary' @@ -33,6 +33,30 @@ export class DefaultReporter extends BaseReporter { this.summary?.onTestModuleQueued(file) } + onTestModuleCollected(module: TestModule) { + this.summary?.onTestModuleCollected(module) + } + + onTestModuleEnd(module: TestModule) { + this.summary?.onTestModuleEnd(module) + } + + onTestCaseStart(test: TestCase) { + this.summary?.onTestCaseStart(test) + } + + onTestCaseEnd(test: TestCase) { + this.summary?.onTestCaseEnd(test) + } + + onHookStart(hook: HookOptions) { + this.summary?.onHookStart(hook) + } + + onHookEnd(hook: HookOptions) { + this.summary?.onHookEnd(hook) + } + onInit(ctx: Vitest) { super.onInit(ctx) this.summary?.onInit(ctx, { verbose: this.verbose }) @@ -52,11 +76,6 @@ export class DefaultReporter extends BaseReporter { this.summary?.onPathsCollected(paths) } - onTaskUpdate(packs: TaskResultPack[]) { - this.summary?.onTaskUpdate(packs) - super.onTaskUpdate(packs) - } - onWatcherRerun(files: string[], trigger?: string) { this.summary?.onWatcherRerun() super.onWatcherRerun(files, trigger) diff --git a/packages/vitest/src/node/reporters/dot.ts b/packages/vitest/src/node/reporters/dot.ts index 96ab171d1a89..febc7c0f752d 100644 --- a/packages/vitest/src/node/reporters/dot.ts +++ b/packages/vitest/src/node/reporters/dot.ts @@ -1,115 +1,91 @@ -import type { File, TaskResultPack, TaskState, Test } from '@vitest/runner' +import type { File, Task, Test } from '@vitest/runner' import type { Vitest } from '../core' -import { getTests } from '@vitest/runner/utils' +import type { TestCase, TestModule } from './reported-tasks' import c from 'tinyrainbow' import { BaseReporter } from './base' import { WindowRenderer } from './renderers/windowedRenderer' -import { TaskParser } from './task-parser' interface Icon { char: string color: (char: string) => string } +type TestCaseState = ReturnType['state'] + export class DotReporter extends BaseReporter { - private summary?: DotSummary + private renderer?: WindowRenderer + private tests = new Map() + private finishedTests = new Set() onInit(ctx: Vitest) { super.onInit(ctx) if (this.isTTY) { - this.summary = new DotSummary() - this.summary.onInit(ctx) + this.renderer = new WindowRenderer({ + logger: ctx.logger, + getWindow: () => this.createSummary(), + }) + + this.ctx.onClose(() => this.renderer?.stop()) } } - onTaskUpdate(packs: TaskResultPack[]) { - this.summary?.onTaskUpdate(packs) - + printTask(task: Task) { if (!this.isTTY) { - super.onTaskUpdate(packs) + super.printTask(task) } } onWatcherRerun(files: string[], trigger?: string) { - this.summary?.onWatcherRerun() + this.tests.clear() + this.renderer?.start() super.onWatcherRerun(files, trigger) } onFinished(files?: File[], errors?: unknown[]) { - this.summary?.onFinished() - super.onFinished(files, errors) - } -} - -class DotSummary extends TaskParser { - private renderer!: WindowRenderer - private tests = new Map() - private finishedTests = new Set() - - onInit(ctx: Vitest): void { - this.ctx = ctx - - this.renderer = new WindowRenderer({ - logger: ctx.logger, - getWindow: () => this.createSummary(), - }) - - this.ctx.onClose(() => this.renderer.stop()) - } - - onWatcherRerun() { - this.tests.clear() - this.renderer.start() - } - - onFinished() { const finalLog = formatTests(Array.from(this.tests.values())) this.ctx.logger.log(finalLog) this.tests.clear() - this.renderer.finish() + this.renderer?.finish() + + super.onFinished(files, errors) } - onTestFilePrepare(file: File): void { - for (const test of getTests(file)) { + onTestModuleCollected(module: TestModule): void { + for (const test of module.children.tests()) { // Dot reporter marks pending tests as running - this.onTestStart(test) + this.onTestCaseStart(test) } } - onTestStart(test: Test) { + onTestCaseStart(test: TestCase) { if (this.finishedTests.has(test.id)) { return } - - this.tests.set(test.id, test.mode || 'run') + this.tests.set(test.id, test.result().state || 'run') } - onTestFinished(test: Test) { - if (this.finishedTests.has(test.id)) { - return - } - + onTestCaseEnd(test: TestCase) { this.finishedTests.add(test.id) - this.tests.set(test.id, test.result?.state || 'skip') + this.tests.set(test.id, test.result().state || 'skipped') } - onTestFileFinished() { + onTestModuleEnd() { const columns = this.ctx.logger.getColumns() if (this.tests.size < columns) { return } - const finishedTests = Array.from(this.tests).filter(entry => entry[1] !== 'run') + const finishedTests = Array.from(this.tests).filter(entry => entry[1] !== 'pending') if (finishedTests.length < columns) { return } // Remove finished tests from state and render them in static output - const states: TaskState[] = [] + const states: TestCaseState[] = [] let count = 0 for (const [id, state] of finishedTests) { @@ -138,14 +114,13 @@ const fail: Icon = { char: 'x', color: c.red } const pending: Icon = { char: '*', color: c.yellow } const skip: Icon = { char: '-', color: (char: string) => c.dim(c.gray(char)) } -function getIcon(state: TaskState): Icon { +function getIcon(state: TestCaseState): Icon { switch (state) { - case 'pass': + case 'passed': return pass - case 'fail': + case 'failed': return fail - case 'skip': - case 'todo': + case 'skipped': return skip default: return pending @@ -156,7 +131,7 @@ function getIcon(state: TaskState): Icon { * Format test states into string while keeping ANSI escapes at minimal. * Sibling icons with same color are merged into a single c.color() call. */ -function formatTests(states: TaskState[]): string { +function formatTests(states: TestCaseState[]): string { let currentIcon = pending let count = 0 let output = '' diff --git a/packages/vitest/src/node/reporters/index.ts b/packages/vitest/src/node/reporters/index.ts index dcb7e99a9297..2772e387cbbd 100644 --- a/packages/vitest/src/node/reporters/index.ts +++ b/packages/vitest/src/node/reporters/index.ts @@ -36,6 +36,7 @@ export type { TestProject } from '../project' * @deprecated Use `TestModule` instead */ export const TestFile = _TestFile +export type { TestSpecification } from '../spec' export * from './benchmark' export type { JsonAssertionResult, @@ -47,7 +48,7 @@ export type { */ export type FileDiagnostic = _FileDiagnostic -export { TestCase, TestModule, TestSuite } from './reported-tasks' +export type { TestCase, TestModule, TestSuite } from './reported-tasks' export const ReportersMap = { 'default': DefaultReporter, @@ -80,6 +81,7 @@ export interface BuiltinReporterOptions { } export type { + HookOptions, TaskOptions, TestCollection, @@ -90,3 +92,5 @@ export type { TestResultPassed, TestResultSkipped, } from './reported-tasks' +export type { SerializedError } from '@vitest/utils' +export type TestRunEndReason = 'passed' | 'interrupted' | 'failed' diff --git a/packages/vitest/src/node/reporters/reported-tasks.ts b/packages/vitest/src/node/reporters/reported-tasks.ts index b0f1f7a6808c..b7f6ff173264 100644 --- a/packages/vitest/src/node/reporters/reported-tasks.ts +++ b/packages/vitest/src/node/reporters/reported-tasks.ts @@ -3,9 +3,10 @@ import type { Test as RunnerTestCase, File as RunnerTestFile, Suite as RunnerTestSuite, + SuiteHooks, TaskMeta, } from '@vitest/runner' -import type { TestError } from '@vitest/utils' +import type { SerializedError, TestError } from '@vitest/utils' import type { TestProject } from '../project' class ReportedTaskImplementation { @@ -122,12 +123,29 @@ export class TestCase extends ReportedTaskImplementation { } /** - * Test results. Will be `undefined` if test is skipped, not finished yet or was just collected. + * Test results. + * - **pending**: Test was collected, but didn't finish running yet. + * - **passed**: Test passed successfully + * - **failed**: Test failed to execute + * - **skipped**: Test was skipped during collection or dynamically with `ctx.skip()`. */ - public result(): TestResult | undefined { + public result(): TestResult { const result = this.task.result + const mode = result?.state || this.task.mode + + if (!result && (mode === 'skip' || mode === 'todo')) { + return { + state: 'skipped', + note: undefined, + errors: undefined, + } + } + if (!result || result.state === 'run' || result.state === 'queued') { - return undefined + return { + state: 'pending', + errors: undefined, + } } const state = result.state === 'fail' ? 'failed' as const @@ -153,14 +171,6 @@ export class TestCase extends ReportedTaskImplementation { } satisfies TestResultFailed } - /** - * Checks if the test was skipped during collection or dynamically with `ctx.skip()`. - */ - public skipped(): boolean { - const mode = this.task.result?.state || this.task.mode - return mode === 'skip' || mode === 'todo' - } - /** * Custom metadata that was attached to the test during its execution. */ @@ -228,13 +238,13 @@ class TestCollection { /** * Filters all tests that are part of this collection and its children. */ - *allTests(state?: TestResult['state'] | 'running'): Generator { + *allTests(state?: TestState): Generator { for (const child of this) { if (child.type === 'suite') { yield * child.children.allTests(state) } else if (state) { - const testState = getTestState(child) + const testState = child.result().state if (state === testState) { yield child } @@ -248,14 +258,14 @@ class TestCollection { /** * Filters only the tests that are part of this collection. */ - *tests(state?: TestResult['state'] | 'running'): Generator { + *tests(state?: TestState): Generator { for (const child of this) { if (child.type !== 'test') { continue } if (state) { - const testState = getTestState(child) + const testState = child.result().state if (state === testState) { yield child } @@ -298,10 +308,38 @@ class TestCollection { export type { TestCollection } +export interface TestSuiteStatistics { + total: number + completed: number + passed: number + failed: number + skipped: number + todo: number +} + +export interface HookOptions { + name: keyof SuiteHooks + entity: TestCase | TestSuite | TestModule +} + +function createStatistics() { + return { + total: 0, + completed: 0, + passed: 0, + failed: 0, + skipped: 0, + todo: 0, + } +} + abstract class SuiteImplementation extends ReportedTaskImplementation { /** @internal */ declare public readonly task: RunnerTestSuite | RunnerTestFile + /** @internal */ + public _statistic: TestSuiteStatistics = createStatistics() + /** * Collection of suites and tests that are part of this suite. */ @@ -314,18 +352,41 @@ abstract class SuiteImplementation extends ReportedTaskImplementation { } /** - * Checks if the suite was skipped during collection. + * The number of tests in this suite with a specific state. + */ + public statistics(): TestSuiteStatistics { + return { ...this._statistic } + } + + /** + * Checks the running state of the suite. */ - public skipped(): boolean { + public state(): TestSuiteState { const mode = this.task.mode - return mode === 'skip' || mode === 'todo' + const state = this.task.result?.state + if (mode === 'skip' || mode === 'todo' || state === 'skip' || state === 'todo') { + return 'skipped' + } + if (state === 'queued') { + return 'queued' + } + if (state == null || state === 'run' || state === 'only') { + return 'pending' + } + if (state === 'fail') { + return 'failed' + } + if (state === 'pass') { + return 'passed' + } + throw new Error(`Unknown suite state: ${state}`) } /** * Errors that happened outside of the test run during collection, like syntax errors. */ - public errors(): TestError[] { - return (this.task.result?.errors as TestError[] | undefined) || [] + public errors(): SerializedError[] { + return (this.task.result?.errors as SerializedError[] | undefined) || [] } } @@ -402,8 +463,8 @@ export class TestModule extends SuiteImplementation { /** * This is usually an absolute UNIX file path. - * It can be a virtual id if the file is not on the disk. - * This value corresponds to Vite's `ModuleGraph` id. + * It can be a virtual ID if the file is not on the disk. + * This value corresponds to the ID in the Vite's module graph. */ public readonly moduleId: string @@ -420,9 +481,9 @@ export class TestModule extends SuiteImplementation { declare public ok: () => boolean /** - * Checks if the module was skipped and didn't run. + * Checks the running state of the test file. */ - declare public skipped: () => boolean + declare public state: () => TestSuiteState /** * Useful information about the module like duration, memory usage, etc. @@ -445,19 +506,21 @@ export class TestModule extends SuiteImplementation { } export interface TaskOptions { - each: boolean | undefined - concurrent: boolean | undefined - shuffle: boolean | undefined - retry: number | undefined - repeats: number | undefined - mode: 'run' | 'only' | 'skip' | 'todo' | 'queued' + readonly each: boolean | undefined + readonly fails: boolean | undefined + readonly concurrent: boolean | undefined + readonly shuffle: boolean | undefined + readonly retry: number | undefined + readonly repeats: number | undefined + readonly mode: 'run' | 'only' | 'skip' | 'todo' | 'queued' } function buildOptions( - task: RunnerTestCase | RunnerTestFile | RunnerTestSuite, + task: RunnerTestCase | RunnerTestSuite, ): TaskOptions { return { each: task.each, + fails: task.type === 'test' && task.fails, concurrent: task.concurrent, shuffle: task.shuffle, retry: task.retry, @@ -466,30 +529,48 @@ function buildOptions( } } -export type TestResult = TestResultPassed | TestResultFailed | TestResultSkipped +export type TestSuiteState = 'skipped' | 'pending' | 'queued' | 'failed' | 'passed' +export type TestState = TestResult['state'] + +export type TestResult = + | TestResultPassed + | TestResultFailed + | TestResultSkipped + | TestResultPending + +export interface TestResultPending { + /** + * The test was collected, but didn't finish running yet. + */ + readonly state: 'pending' + /** + * Pending tests have no errors. + */ + readonly errors: undefined +} export interface TestResultPassed { /** * The test passed successfully. */ - state: 'passed' + readonly state: 'passed' /** * Errors that were thrown during the test execution. * * **Note**: If test was retried successfully, errors will still be reported. */ - errors: TestError[] | undefined + readonly errors: ReadonlyArray | undefined } export interface TestResultFailed { /** * The test failed to execute. */ - state: 'failed' + readonly state: 'failed' /** * Errors that were thrown during the test execution. */ - errors: TestError[] + readonly errors: ReadonlyArray } export interface TestResultSkipped { @@ -497,80 +578,72 @@ export interface TestResultSkipped { * The test was skipped with `only` (on another test), `skip` or `todo` flag. * You can see which one was used in the `options.mode` option. */ - state: 'skipped' + readonly state: 'skipped' /** * Skipped tests have no errors. */ - errors: undefined + readonly errors: undefined /** * A custom note passed down to `ctx.skip(note)`. */ - note: string | undefined + readonly note: string | undefined } export interface TestDiagnostic { /** * If the duration of the test is above `slowTestThreshold`. */ - slow: boolean + readonly slow: boolean /** * The amount of memory used by the test in bytes. * This value is only available if the test was executed with `logHeapUsage` flag. */ - heap: number | undefined + readonly heap: number | undefined /** * The time it takes to execute the test in ms. */ - duration: number + readonly duration: number /** * The time in ms when the test started. */ - startTime: number + readonly startTime: number /** * The amount of times the test was retried. */ - retryCount: number + readonly retryCount: number /** * The amount of times the test was repeated as configured by `repeats` option. * This value can be lower if the test failed during the repeat and no `retry` is configured. */ - repeatCount: number + readonly repeatCount: number /** * If test passed on a second retry. */ - flaky: boolean + readonly flaky: boolean } export interface ModuleDiagnostic { /** * The time it takes to import and initiate an environment. */ - environmentSetupDuration: number + readonly environmentSetupDuration: number /** * The time it takes Vitest to setup test harness (runner, mocks, etc.). */ - prepareDuration: number + readonly prepareDuration: number /** * The time it takes to import the test module. * This includes importing everything in the module and executing suite callbacks. */ - collectDuration: number + readonly collectDuration: number /** * The time it takes to import the setup module. */ - setupDuration: number + readonly setupDuration: number /** * Accumulated duration of all tests and hooks in the module. */ - duration: number -} - -function getTestState(test: TestCase): TestResult['state'] | 'running' { - if (test.skipped()) { - return 'skipped' - } - const result = test.result() - return result ? result.state : 'running' + readonly duration: number } function storeTask( diff --git a/packages/vitest/src/node/reporters/summary.ts b/packages/vitest/src/node/reporters/summary.ts index 7dca5d2c7a8b..c76f8e3c7211 100644 --- a/packages/vitest/src/node/reporters/summary.ts +++ b/packages/vitest/src/node/reporters/summary.ts @@ -1,14 +1,10 @@ -import type { File, Test } from '@vitest/runner' import type { Vitest } from '../core' import type { Reporter } from '../types/reporter' -import type { TestModule } from './reported-tasks' -import type { HookOptions } from './task-parser' -import { getTests } from '@vitest/runner/utils' +import type { HookOptions, TestCase, TestModule } from './reported-tasks' import c from 'tinyrainbow' import { F_POINTER, F_TREE_NODE_END, F_TREE_NODE_MIDDLE } from './renderers/figures' import { formatProjectName, formatTime, formatTimeString, padSummaryTitle } from './renderers/utils' import { WindowRenderer } from './renderers/windowedRenderer' -import { TaskParser } from './task-parser' const DURATION_UPDATE_INTERVAL_MS = 100 const FINISHED_TEST_CLEANUP_TIME_MS = 1_000 @@ -34,33 +30,31 @@ interface SlowTask { hook?: Omit } -interface RunningTest extends Pick { - filename: File['name'] - projectName: File['projectName'] +interface RunningModule extends Pick { + filename: TestModule['task']['name'] + projectName: TestModule['project']['name'] hook?: Omit - tests: Map + tests: Map } /** * Reporter extension that renders summary and forwards all other logs above itself. * Intended to be used by other reporters, not as a standalone reporter. */ -export class SummaryReporter extends TaskParser implements Reporter { +export class SummaryReporter implements Reporter { + private ctx!: Vitest private options!: Options private renderer!: WindowRenderer - private suites = emptyCounters() + private modules = emptyCounters() private tests = emptyCounters() private maxParallelTests = 0 - /** Currently running tests, may include finished tests too */ - private runningTests = new Map() + /** Currently running test modules, may include finished test modules too */ + private runningModules = new Map() - /** ID of finished `this.runningTests` that are currently being shown */ - private finishedTests = new Map() - - /** IDs of all finished tests */ - private allFinishedTests = new Set() + /** ID of finished `this.runningModules` that are currently being shown */ + private finishedModules = new Map() private startTime = '' private currentTime = 0 @@ -88,19 +82,14 @@ export class SummaryReporter extends TaskParser implements Reporter { }) } - onTestModuleQueued(module: TestModule) { - this.onTestFilePrepare(module.task) - } - onPathsCollected(paths?: string[]) { - this.suites.total = (paths || []).length + this.modules.total = (paths || []).length } onWatcherRerun() { - this.runningTests.clear() - this.finishedTests.clear() - this.allFinishedTests.clear() - this.suites = emptyCounters() + this.runningModules.clear() + this.finishedModules.clear() + this.modules = emptyCounters() this.tests = emptyCounters() this.startTimers() @@ -108,47 +97,35 @@ export class SummaryReporter extends TaskParser implements Reporter { } onFinished() { - this.runningTests.clear() - this.finishedTests.clear() - this.allFinishedTests.clear() + this.runningModules.clear() + this.finishedModules.clear() this.renderer.finish() clearInterval(this.durationInterval) } - onTestFilePrepare(file: File) { - if (this.runningTests.has(file.id)) { - const stats = this.runningTests.get(file.id)! - // if there are no tests, it means the test was queued but not collected - if (!stats.total) { - const total = getTests(file).length - this.tests.total += total - stats.total = total - } - return + onTestModuleQueued(module: TestModule) { + // When new test module starts, take the place of previously finished test module, if any + if (this.finishedModules.size) { + const finished = this.finishedModules.keys().next().value + this.removeTestModule(finished) } - if (this.allFinishedTests.has(file.id)) { - return - } + this.runningModules.set(module.id, initializeStats(module)) + } - const total = getTests(file).length - this.tests.total += total + onTestModuleCollected(module: TestModule) { + let stats = this.runningModules.get(module.id) - // When new test starts, take the place of previously finished test, if any - if (this.finishedTests.size) { - const finished = this.finishedTests.keys().next().value - this.removeTestFile(finished) + if (!stats) { + stats = initializeStats(module) + this.runningModules.set(module.id, stats) } - this.runningTests.set(file.id, { - total, - completed: 0, - filename: file.name, - projectName: file.projectName, - tests: new Map(), - }) + const total = module.children.allTests().toArray().length + this.tests.total += total + stats.total = total - this.maxParallelTests = Math.max(this.maxParallelTests, this.runningTests.size) + this.maxParallelTests = Math.max(this.maxParallelTests, this.runningModules.size) } onHookStart(options: HookOptions) { @@ -185,13 +162,13 @@ export class SummaryReporter extends TaskParser implements Reporter { stats.hook.visible = false } - onTestStart(test: Test) { + onTestCaseStart(test: TestCase) { // Track slow running tests only on verbose mode if (!this.options.verbose) { return } - const stats = this.getTestStats(test) + const stats = this.runningModules.get(test.module.id) if (!stats || stats.tests.has(test.id)) { return @@ -216,8 +193,8 @@ export class SummaryReporter extends TaskParser implements Reporter { stats.tests.set(test.id, slowTest) } - onTestFinished(test: Test) { - const stats = this.getTestStats(test) + onTestCaseEnd(test: TestCase) { + const stats = this.runningModules.get(test.module.id) if (!stats) { return @@ -227,94 +204,73 @@ export class SummaryReporter extends TaskParser implements Reporter { stats.tests.delete(test.id) stats.completed++ - const result = test.result + const result = test.result() - if (result?.state === 'pass') { + if (result?.state === 'passed') { this.tests.passed++ } - else if (result?.state === 'fail') { + else if (result?.state === 'failed') { this.tests.failed++ } - else if (!result?.state || result?.state === 'skip' || result?.state === 'todo') { + else if (!result?.state || result?.state === 'skipped') { this.tests.skipped++ } } - onTestFileFinished(file: File) { - if (this.allFinishedTests.has(file.id)) { - return - } + onTestModuleEnd(module: TestModule) { + const state = module.state() + this.modules.completed++ - this.allFinishedTests.add(file.id) - this.suites.completed++ - - if (file.result?.state === 'pass') { - this.suites.passed++ + if (state === 'passed') { + this.modules.passed++ } - else if (file.result?.state === 'fail') { - this.suites.failed++ + else if (state === 'failed') { + this.modules.failed++ } - else if (file.result?.state === 'skip') { - this.suites.skipped++ + else if (module.task.mode === 'todo' && state === 'skipped') { + this.modules.todo++ } - else if (file.result?.state === 'todo') { - this.suites.todo++ + else if (state === 'skipped') { + this.modules.skipped++ } - const left = this.suites.total - this.suites.completed + const left = this.modules.total - this.modules.completed // Keep finished tests visible in summary for a while if there are more tests left. // When a new test starts in onTestFilePrepare it will take this ones place. // This reduces flickering by making summary more stable. if (left > this.maxParallelTests) { - this.finishedTests.set(file.id, setTimeout(() => { - this.removeTestFile(file.id) + this.finishedModules.set(module.id, setTimeout(() => { + this.removeTestModule(module.id) }, FINISHED_TEST_CLEANUP_TIME_MS).unref()) } else { // Run is about to end as there are less tests left than whole run had parallel at max. - // Remove finished test immediately. - this.removeTestFile(file.id) + // Remove finished test immediatelly. + this.removeTestModule(module.id) } } - private getTestStats(test: Test) { - const file = test.file - let stats = this.runningTests.get(file.id) - - if (!stats || stats.total === 0) { - // It's possible that that test finished before it's preparation was even reported - this.onTestFilePrepare(test.file) - stats = this.runningTests.get(file.id)! - - // It's also possible that this update came after whole test file was reported as finished - if (!stats) { - return - } - } - - return stats - } - - private getHookStats({ file, id, type }: HookOptions) { + private getHookStats({ entity }: HookOptions) { // Track slow running hooks only on verbose mode if (!this.options.verbose) { return } - const stats = this.runningTests.get(file.id) + const module = entity.type === 'module' ? entity : entity.module + const stats = this.runningModules.get(module.id) if (!stats) { return } - return type === 'suite' ? stats : stats?.tests.get(id) + return entity.type === 'test' ? stats.tests.get(entity.id) : stats } private createSummary() { const summary = [''] - for (const testFile of Array.from(this.runningTests.values()).sort(sortRunningTests)) { + for (const testFile of Array.from(this.runningModules.values()).sort(sortRunningModules)) { summary.push( c.bold(c.yellow(` ${F_POINTER} `)) + formatProjectName(testFile.projectName) @@ -345,11 +301,11 @@ export class SummaryReporter extends TaskParser implements Reporter { } } - if (this.runningTests.size > 0) { + if (this.runningModules.size > 0) { summary.push('') } - summary.push(padSummaryTitle('Test Files') + getStateString(this.suites)) + summary.push(padSummaryTitle('Test Files') + getStateString(this.modules)) summary.push(padSummaryTitle('Tests') + getStateString(this.tests)) summary.push(padSummaryTitle('Start at') + this.startTime) summary.push(padSummaryTitle('Duration') + formatTime(this.duration)) @@ -369,19 +325,19 @@ export class SummaryReporter extends TaskParser implements Reporter { }, DURATION_UPDATE_INTERVAL_MS).unref() } - private removeTestFile(id?: File['id']) { + private removeTestModule(id?: TestModule['id']) { if (!id) { return } - const testFile = this.runningTests.get(id) + const testFile = this.runningModules.get(id) testFile?.hook?.onFinish() testFile?.tests?.forEach(test => test.onFinish()) - this.runningTests.delete(id) + this.runningModules.delete(id) - clearTimeout(this.finishedTests.get(id)) - this.finishedTests.delete(id) + clearTimeout(this.finishedModules.get(id)) + this.finishedModules.delete(id) } } @@ -402,7 +358,7 @@ function getStateString(entry: Counter) { ) } -function sortRunningTests(a: RunningTest, b: RunningTest) { +function sortRunningModules(a: RunningModule, b: RunningModule) { if ((a.projectName || '') > (b.projectName || '')) { return 1 } @@ -413,3 +369,13 @@ function sortRunningTests(a: RunningTest, b: RunningTest) { return a.filename.localeCompare(b.filename) } + +function initializeStats(module: TestModule) { + return { + total: 0, + completed: 0, + filename: module.task.name, + projectName: module.project.name, + tests: new Map(), + } +} diff --git a/packages/vitest/src/node/reporters/task-parser.ts b/packages/vitest/src/node/reporters/task-parser.ts deleted file mode 100644 index 7ff04f178f76..000000000000 --- a/packages/vitest/src/node/reporters/task-parser.ts +++ /dev/null @@ -1,86 +0,0 @@ -import type { File, Task, TaskResultPack, Test } from '@vitest/runner' -import type { Vitest } from '../core' -import { getTests } from '@vitest/runner/utils' - -export interface HookOptions { - name: string - file: File - id: File['id'] | Test['id'] - type: Task['type'] -} - -export class TaskParser { - ctx!: Vitest - - onInit(ctx: Vitest) { - this.ctx = ctx - } - - onHookStart(_options: HookOptions) {} - onHookEnd(_options: HookOptions) {} - - onTestStart(_test: Test) {} - onTestFinished(_test: Test) {} - - onTestFilePrepare(_file: File) {} - onTestFileFinished(_file: File) {} - - onTaskUpdate(packs: TaskResultPack[]) { - const startingTestFiles: File[] = [] - const finishedTestFiles: File[] = [] - - const startingTests: Test[] = [] - const finishedTests: Test[] = [] - - const startingHooks: HookOptions[] = [] - const endingHooks: HookOptions[] = [] - - for (const pack of packs) { - const task = this.ctx.state.idMap.get(pack[0]) - - if (task?.type === 'suite' && 'filepath' in task && task.result?.state) { - if (task?.result?.state === 'run' || task?.result?.state === 'queued') { - startingTestFiles.push(task) - } - else { - // Skipped tests are not reported, do it manually - for (const test of getTests(task)) { - if (!test.result || test.result?.state === 'skip') { - finishedTests.push(test) - } - } - - finishedTestFiles.push(task.file) - } - } - - if (task?.type === 'test') { - if (task.result?.state === 'run' || task.result?.state === 'queued') { - startingTests.push(task) - } - else if (task.result?.hooks?.afterEach !== 'run') { - finishedTests.push(task) - } - } - - if (task?.result?.hooks) { - for (const [hook, state] of Object.entries(task.result.hooks)) { - if (state === 'run' || state === 'queued') { - startingHooks.push({ name: hook, file: task.file, id: task.id, type: task.type }) - } - else { - endingHooks.push({ name: hook, file: task.file, id: task.id, type: task.type }) - } - } - } - } - - endingHooks.forEach(hook => this.onHookEnd(hook)) - finishedTests.forEach(test => this.onTestFinished(test)) - finishedTestFiles.forEach(file => this.onTestFileFinished(file)) - - startingTestFiles.forEach(file => this.onTestFilePrepare(file)) - startingTests.forEach(test => this.onTestStart(test)) - startingHooks.forEach(hook => this.onHookStart(hook)) - } -} diff --git a/packages/vitest/src/node/spec.ts b/packages/vitest/src/node/spec.ts index 8ae14ec121dc..11f2268d25ab 100644 --- a/packages/vitest/src/node/spec.ts +++ b/packages/vitest/src/node/spec.ts @@ -1,6 +1,9 @@ import type { SerializedTestSpecification } from '../runtime/types/utils' import type { TestProject } from './project' +import type { TestModule } from './reporters/reported-tasks' import type { Pool } from './types/pool-options' +import { generateFileHash } from '@vitest/runner/utils' +import { relative } from 'pathe' export class TestSpecification { /** @@ -16,6 +19,10 @@ export class TestSpecification { */ public readonly 2: { pool: Pool } + /** + * The task ID associated with the test module. + */ + public readonly taskId: string /** * The test project that the module belongs to. */ @@ -43,12 +50,34 @@ export class TestSpecification { this[0] = project this[1] = moduleId this[2] = { pool } + const name = project.name + const hashName = pool !== 'typescript' + ? name + : name + // https://github.com/vitest-dev/vitest/blob/main/packages/vitest/src/typecheck/collect.ts#L58 + ? `${name}:__typecheck__` + : '__typecheck__' + this.taskId = generateFileHash( + relative(project.config.root, moduleId), + hashName, + ) this.project = project this.moduleId = moduleId this.pool = pool this.testLines = testLines } + /** + * Test module assosiacted with the specification. + */ + get testModule(): TestModule | undefined { + const task = this.project.vitest.state.idMap.get(this.taskId) + if (!task) { + return undefined + } + return this.project.vitest.state.getReportedEntity(task) as TestModule | undefined + } + toJSON(): SerializedTestSpecification { return [ { diff --git a/packages/vitest/src/node/test-run.ts b/packages/vitest/src/node/test-run.ts new file mode 100644 index 000000000000..35373a82a007 --- /dev/null +++ b/packages/vitest/src/node/test-run.ts @@ -0,0 +1,161 @@ +import type { File as RunnerTestFile, TaskResultPack } from '@vitest/runner' +import type { SerializedError } from '../public/utils' +import type { UserConsoleLog } from '../types/general' +import type { Vitest } from './core' +import type { TestProject } from './project' +import type { HookOptions, TestCase, TestModule } from './reporters/reported-tasks' +import type { TestSpecification } from './spec' + +export class TestRun { + private tests = emptyCounters() + private suites = emptyCounters() + + constructor(private vitest: Vitest) {} + + async start(specifications: TestSpecification[]) { + this.tests = emptyCounters() + this.suites = emptyCounters() + this.suites.total = specifications.length + + await this.vitest.report('onTestRunStart', [...specifications]) + } + + async enqueued(project: TestProject, file: RunnerTestFile) { + this.vitest.state.collectFiles(project, [file]) + const testModule = this.vitest.state.getReportedEntity(file) as TestModule + await this.vitest.report('onTestModuleQueued', testModule) + } + + async collected(project: TestProject, files: RunnerTestFile[]) { + this.vitest.state.collectFiles(project, files) + await Promise.all([ + this.vitest.report('onCollected', files), + ...files.map((file) => { + const testModule = this.vitest.state.getReportedEntity(file) as TestModule + return this.vitest.report('onTestModuleCollected', testModule) + }), + ]) + } + + async log(log: UserConsoleLog) { + this.vitest.state.updateUserLog(log) + await this.vitest.report('onUserConsoleLog', log) + } + + async updated(update: TaskResultPack[]) { + this.vitest.state.updateTasks(update) + + // These are used to guarantee correct reporting order + const runningTestModules: TestModule[] = [] + const finishedTestModules: TestModule[] = [] + + const runningTestCases: TestCase[] = [] + const finishedTestCases: TestCase[] = [] + + const startingHooks: HookOptions[] = [] + const endingHooks: HookOptions[] = [] + + for (const [id,,,events] of update) { + const task = this.vitest.state.idMap.get(id) + const entity = task && this.vitest.state.getReportedEntity(task) + + if (!entity) { + continue + } + + for (const event of events) { + if (event === 'suite-prepare' && entity.type === 'module') { + runningTestModules.push(entity) + } + + if (event === 'suite-finished' && entity.type === 'module') { + finishedTestModules.push(entity) + + // Skipped tests need to be reported manually once test module has finished + for (const test of entity.children.allTests()) { + if (test.result().state === 'skipped') { + finishedTestCases.push(test) + } + } + } + + if (event === 'test-prepare' && entity.type === 'test') { + runningTestCases.push(entity) + } + + if (event === 'test-finished' && entity.type === 'test') { + finishedTestCases.push(entity) + } + + if ((event === 'suite-hook-start' || event === 'suite-hook-end') && entity.task.result?.hooks) { + for (const hook of Object.keys(entity.task.result.hooks)) { + const name = hook as keyof (typeof entity.task.result.hooks) + + if (event === 'suite-hook-start') { + startingHooks.push({ name, entity }) + } + else { + endingHooks.push({ name, entity }) + } + } + } + } + } + + // TODO: error handling + + // TODO: what is the order or reports here? + // "onTaskUpdate" in parallel with others or before all or after all? + await this.vitest.report('onTaskUpdate', update) + + // Order of reporting is important here + await Promise.all(endingHooks.map(hook => this.vitest.report('onHookEnd', hook))) + await Promise.all(finishedTestCases.map(testCase => this.vitest.report('onTestCaseEnd', testCase))) + await Promise.all(finishedTestModules.map(module => this.vitest.report('onTestModuleEnd', module))) + + await Promise.all(runningTestModules.map(module => this.vitest.report('onTestModuleStart', module))) + await Promise.all(runningTestCases.map(testCase => this.vitest.report('onTestCaseStart', testCase))) + await Promise.all(startingHooks.map(hook => this.vitest.report('onHookStart', hook))) + } + + async end(specifications: TestSpecification[], errors: unknown[], coverage?: unknown) { + const state = this.vitest.isCancelling + ? 'interrupted' + // by this point, the run will be marked as failed if there are any errors, + // should it be done by testRun.end? + : process.exitCode + ? 'failed' + : 'passed' + + const modules = specifications.map((spec) => { + if (!spec.testModule) { + const error = new Error(`Module "${spec.moduleId}" was not found when finishing test run. This is a bug in Vitest. Please, open an issue.`) + this.vitest.state.catchError(error, 'Unhandled Error') + errors.push(error) + return null + } + return spec.testModule + }).filter(s => s != null) + const files = modules.map(m => m.task) + + await Promise.all([ + this.vitest.report('onTestRunEnd', modules, [...errors] as SerializedError[], state), + // TODO: in a perfect world, the coverage should be done in parallel to `onFinished` + this.vitest.report('onFinished', files, errors, coverage), + ]) + await this.vitest.report('onCoverage', coverage) + } +} + +interface Counter { + total: number + completed: number + passed: number + failed: number + skipped: number + todo: number +} + +function emptyCounters(): Counter { + return { completed: 0, passed: 0, failed: 0, skipped: 0, todo: 0, total: 0 } +} diff --git a/packages/vitest/src/node/types/reporter.ts b/packages/vitest/src/node/types/reporter.ts index d45d6bf376a4..16c0452b0530 100644 --- a/packages/vitest/src/node/types/reporter.ts +++ b/packages/vitest/src/node/types/reporter.ts @@ -1,20 +1,35 @@ import type { File, TaskResultPack } from '@vitest/runner' +import type { SerializedError } from '@vitest/utils' import type { SerializedTestSpecification } from '../../runtime/types/utils' import type { Awaitable, UserConsoleLog } from '../../types/general' import type { Vitest } from '../core' -import type { TestModule } from '../reporters/reported-tasks' +import type { TestRunEndReason } from '../reporters' +import type { HookOptions, TestCase, TestModule } from '../reporters/reported-tasks' +import type { TestSpecification } from '../spec' export interface Reporter { - onInit?: (ctx: Vitest) => void + onInit?: (vitest: Vitest) => void + /** + * @deprecated use `onTestRunStart` instead + */ onPathsCollected?: (paths?: string[]) => Awaitable + /** + * @deprecated use `onTestRunStart` instead + */ onSpecsCollected?: (specs?: SerializedTestSpecification[]) => Awaitable - onTestModuleQueued?: (file: TestModule) => Awaitable - onCollected?: (files?: File[]) => Awaitable + // TODO: deprecate instead of what(?) + onCollected?: (files: File[]) => Awaitable + /** + * @deprecated use `onTestRunEnd` instead + */ onFinished?: ( files: File[], errors: unknown[], coverage?: unknown ) => Awaitable + /** + * @deprecated use `onTestModuleQueued`, `onTestModuleStart`, `onTestModuleEnd`, `onTestCaseStart`, `onTestCaseEnd` instead + */ onTaskUpdate?: (packs: TaskResultPack[]) => Awaitable onTestRemoved?: (trigger?: string) => Awaitable onWatcherStart?: (files?: File[], errors?: unknown[]) => Awaitable @@ -22,4 +37,44 @@ export interface Reporter { onServerRestart?: (reason?: string) => Awaitable onUserConsoleLog?: (log: UserConsoleLog) => Awaitable onProcessTimeout?: () => Awaitable + + // new API, TODO: add a lot of documentation for those + /** + * Called when the new test run starts. + */ + onTestRunStart?: (specifications: ReadonlyArray) => Awaitable + /** + * Called when the test run is finished. + */ + onTestRunEnd?: ( + testModules: ReadonlyArray, + unhandledErrors: ReadonlyArray, + reason: TestRunEndReason + ) => Awaitable + /** + * Called when the module is enqueued for testing. The file itself is not loaded yet. + */ + onTestModuleQueued?: (testModule: TestModule) => Awaitable + /** + * Called when the test file is loaded and the module is ready to run tests. + */ + onTestModuleCollected?: (testModule: TestModule) => Awaitable + onTestModuleStart?: (testModule: TestModule) => Awaitable + onTestModuleEnd?: (testModule: TestModule) => Awaitable + + /** + * Called before the `beforeEach` hooks for the test are run. + * The `result()` will return either `pending` or `skipped`. + */ + onTestCaseStart?: (testCase: TestCase) => Awaitable + /** + * Called after the test and its hooks are finished running. + * The `result()` cannot be `pending`. + */ + onTestCaseEnd?: (testCase: TestCase) => Awaitable + + onHookStart?: (hook: HookOptions) => Awaitable + onHookEnd?: (hook: HookOptions) => Awaitable + + onCoverage?: (coverage: unknown) => Awaitable } diff --git a/packages/vitest/src/public/node.ts b/packages/vitest/src/public/node.ts index bc7fbcb502a4..c63c5a7f464e 100644 --- a/packages/vitest/src/public/node.ts +++ b/packages/vitest/src/public/node.ts @@ -34,8 +34,8 @@ export type { JUnitOptions } from '../node/reporters/junit' export type { ModuleDiagnostic, - TaskOptions, + TestCase, TestCollection, TestDiagnostic, @@ -44,7 +44,10 @@ export type { TestResultFailed, TestResultPassed, TestResultSkipped, + TestState, TestSuite, + TestSuiteState, + TestSuiteStatistics, } from '../node/reporters/reported-tasks' export { BaseSequencer } from '../node/sequencers/BaseSequencer' diff --git a/packages/vitest/src/runtime/rpc.ts b/packages/vitest/src/runtime/rpc.ts index f00d48cef005..556be6863955 100644 --- a/packages/vitest/src/runtime/rpc.ts +++ b/packages/vitest/src/runtime/rpc.ts @@ -75,7 +75,6 @@ export function createRuntimeRpc( { eventNames: [ 'onUserConsoleLog', - 'onFinished', 'onCollected', 'onCancel', ], diff --git a/packages/vitest/src/runtime/runners/benchmark.ts b/packages/vitest/src/runtime/runners/benchmark.ts index c24e5d596ab8..7995cd7aa1b2 100644 --- a/packages/vitest/src/runtime/runners/benchmark.ts +++ b/packages/vitest/src/runtime/runners/benchmark.ts @@ -1,6 +1,7 @@ import type { Suite, Task, + TaskUpdateEvent, VitestRunner, VitestRunnerImportSource, } from '@vitest/runner' @@ -59,7 +60,7 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { startTime: start, benchmark: createBenchmarkResult(suite.name), } - updateTask(suite) + updateTask('suite-prepare', suite) const addBenchTaskListener = ( task: InstanceType, @@ -82,7 +83,7 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { if (!runner.config.benchmark?.includeSamples) { result.samples.length = 0 } - updateTask(benchmark) + updateTask('test-finished', benchmark) }, { once: true, @@ -122,7 +123,7 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { for (const benchmark of benchmarkGroup) { const task = benchmarkTasks.get(benchmark)! - updateTask(benchmark) + updateTask('test-prepare', benchmark) await task.warmup() tasks.push([ await new Promise(resolve => @@ -137,14 +138,14 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { suite.result!.duration = performance.now() - start suite.result!.state = 'pass' - updateTask(suite) + updateTask('suite-finished', suite) defer.resolve(null) await defer } - function updateTask(task: Task) { - updateRunnerTask(task, runner) + function updateTask(event: TaskUpdateEvent, task: Task) { + updateRunnerTask(event, task, runner) } } diff --git a/packages/vitest/src/typecheck/typechecker.ts b/packages/vitest/src/typecheck/typechecker.ts index fc3d8c611a83..4a605b07f373 100644 --- a/packages/vitest/src/typecheck/typechecker.ts +++ b/packages/vitest/src/typecheck/typechecker.ts @@ -362,7 +362,12 @@ export class Typechecker { return Object.values(this._tests || {}) .map(({ file }) => getTasks(file)) .flat() - .map(i => [i.id, i.result, { typecheck: true }]) + .map(i => [ + i.id, + i.result, + { typecheck: true }, + [i.type === 'suite' ? 'suite-finished' : 'test-finished'], + ]) } } diff --git a/packages/vitest/src/types/rpc.ts b/packages/vitest/src/types/rpc.ts index cbdbbd9ef815..a90d4aff3080 100644 --- a/packages/vitest/src/types/rpc.ts +++ b/packages/vitest/src/types/rpc.ts @@ -35,7 +35,6 @@ export interface RuntimeRPC { force?: boolean ) => Promise - onFinished: (files: File[], errors?: unknown[]) => void onPathsCollected: (paths: string[]) => void onUserConsoleLog: (log: UserConsoleLog) => void onUnhandledError: (err: unknown, type: string) => void diff --git a/test/benchmark/test/reporter.test.ts b/test/benchmark/test/reporter.test.ts index 4e3895ede04e..f5c89d52b878 100644 --- a/test/benchmark/test/reporter.test.ts +++ b/test/benchmark/test/reporter.test.ts @@ -1,7 +1,5 @@ -import type { RunnerTestCase } from 'vitest' import * as pathe from 'pathe' import { assert, expect, it } from 'vitest' -import { TaskParser } from 'vitest/src/node/reporters/task-parser.js' import { runVitest } from '../../test-utils' it('summary', async () => { @@ -35,30 +33,6 @@ it('non-tty', async () => { } }) -it('reports passed tasks just once', async () => { - const passed: string[] = [] - - class CustomReporter extends TaskParser { - onTestFinished(_test: RunnerTestCase): void { - passed.push(_test.name) - } - } - - await runVitest({ - root: pathe.join(import.meta.dirname, '../fixtures/reporter'), - benchmark: { - reporters: new CustomReporter(), - }, - }, ['multiple.bench.ts'], 'benchmark') - - expect(passed).toMatchInlineSnapshot(` - [ - "first", - "second", - ] - `) -}) - it.for([true, false])('includeSamples %s', async (includeSamples) => { const result = await runVitest( { diff --git a/test/cli/fixtures/custom-pool/pool/custom-pool.ts b/test/cli/fixtures/custom-pool/pool/custom-pool.ts index 614b1363add6..a6cc8ddf2ac3 100644 --- a/test/cli/fixtures/custom-pool/pool/custom-pool.ts +++ b/test/cli/fixtures/custom-pool/pool/custom-pool.ts @@ -49,7 +49,12 @@ export default (vitest: Vitest): ProcessPool => { } taskFile.tasks.push(taskTest) await methods.onCollected([taskFile]) - await methods.onTaskUpdate(getTasks(taskFile).map(task => [task.id, task.result, task.meta])) + await methods.onTaskUpdate(getTasks(taskFile).map(task => [ + task.id, + task.result, + task.meta, + [task.type === 'test' ? 'test-finished' : 'suite-finished'] + ])) } }, close() { diff --git a/test/cli/test/reported-tasks.test.ts b/test/cli/test/reported-tasks.test.ts index 02e0df073e91..1aac1e92270f 100644 --- a/test/cli/test/reported-tasks.test.ts +++ b/test/cli/test/reported-tasks.test.ts @@ -66,7 +66,7 @@ it('correctly reports a file', () => { expect.soft([...testModule.children.allTests('skipped')]).toHaveLength(7) expect.soft([...testModule.children.allTests('passed')]).toHaveLength(9) expect.soft([...testModule.children.allTests('failed')]).toHaveLength(5) - expect.soft([...testModule.children.allTests('running')]).toHaveLength(0) + expect.soft([...testModule.children.allTests('pending')]).toHaveLength(0) const suites = [...testModule.children.suites()] expect(suites).toHaveLength(5) @@ -163,6 +163,22 @@ it('correctly reports failed test', () => { expect(diagnostic.repeatCount).toBe(0) }) +it('correctly reports a skipped test', () => { + const optionTestCase = findTest(testModule.children, 'skips an option test') + expect(optionTestCase.result()).toEqual({ + state: 'skipped', + note: undefined, + errors: undefined, + }) + + const modifierTestCase = findTest(testModule.children, 'skips a .modifier test') + expect(modifierTestCase.result()).toEqual({ + state: 'skipped', + note: undefined, + errors: undefined, + }) +}) + it('correctly reports multiple failures', () => { const testCase = findTest(testModule.children, 'fails multiple times') const result = testCase.result()! diff --git a/test/core/test/sequencers.test.ts b/test/core/test/sequencers.test.ts index 0850cd5f44cb..2cfc11860b66 100644 --- a/test/core/test/sequencers.test.ts +++ b/test/core/test/sequencers.test.ts @@ -20,6 +20,9 @@ function buildCtx() { function buildWorkspace() { return { name: 'test', + config: { + root: import.meta.dirname, + }, } as any as WorkspaceProject } diff --git a/test/reporters/fixtures/task-parser-tests/example-1.test.ts b/test/reporters/fixtures/test-run-tests/example-1.test.ts similarity index 100% rename from test/reporters/fixtures/task-parser-tests/example-1.test.ts rename to test/reporters/fixtures/test-run-tests/example-1.test.ts diff --git a/test/reporters/fixtures/task-parser-tests/example-2.test.ts b/test/reporters/fixtures/test-run-tests/example-2.test.ts similarity index 100% rename from test/reporters/fixtures/task-parser-tests/example-2.test.ts rename to test/reporters/fixtures/test-run-tests/example-2.test.ts diff --git a/test/reporters/tests/task-parser.test.ts b/test/reporters/tests/task-parser.test.ts deleted file mode 100644 index 22779ecb377b..000000000000 --- a/test/reporters/tests/task-parser.test.ts +++ /dev/null @@ -1,157 +0,0 @@ -import type { File, Test } from '@vitest/runner' -import type { TestSpecification } from 'vitest/node' -import type { Reporter } from 'vitest/reporters' -import type { HookOptions } from '../../../packages/vitest/src/node/reporters/task-parser' -import { expect, test } from 'vitest' -import { TaskParser } from '../../../packages/vitest/src/node/reporters/task-parser' -import { runVitest } from '../../test-utils' - -test('tasks are reported in correct order', async () => { - const reporter = new TaskReporter() - - const { stdout, stderr } = await runVitest({ - config: false, - include: ['./fixtures/task-parser-tests/*.test.ts'], - fileParallelism: false, - reporters: [reporter], - sequence: { sequencer: Sorter }, - }) - - expect(stdout).toBe('') - expect(stderr).toBe('') - - expect(reporter.calls).toMatchInlineSnapshot(` - [ - "|fixtures/task-parser-tests/example-1.test.ts| start", - "|fixtures/task-parser-tests/example-1.test.ts| beforeAll start (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| start", - "|fixtures/task-parser-tests/example-1.test.ts| RUN some test", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach start (test)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| RUN some test", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| afterEach start (test)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| afterAll end (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| DONE some test", - "|fixtures/task-parser-tests/example-1.test.ts| DONE Fast test 1", - "|fixtures/task-parser-tests/example-1.test.ts| RUN parallel slow tests 1.1", - "|fixtures/task-parser-tests/example-1.test.ts| RUN parallel slow tests 1.2", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| DONE parallel slow tests 1.1", - "|fixtures/task-parser-tests/example-1.test.ts| DONE parallel slow tests 1.2", - "|fixtures/task-parser-tests/example-1.test.ts| start", - "|fixtures/task-parser-tests/example-1.test.ts| afterAll start (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| afterAll end (suite)", - "|fixtures/task-parser-tests/example-1.test.ts| DONE Skipped test 1", - "|fixtures/task-parser-tests/example-1.test.ts| finish", - "|fixtures/task-parser-tests/example-2.test.ts| start", - "|fixtures/task-parser-tests/example-2.test.ts| beforeAll start (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| start", - "|fixtures/task-parser-tests/example-2.test.ts| RUN some test", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach start (test)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| RUN some test", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| afterEach start (test)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| afterAll end (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| DONE some test", - "|fixtures/task-parser-tests/example-2.test.ts| DONE Fast test 1", - "|fixtures/task-parser-tests/example-2.test.ts| RUN parallel slow tests 2.1", - "|fixtures/task-parser-tests/example-2.test.ts| RUN parallel slow tests 2.2", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| afterEach end (test)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| DONE parallel slow tests 2.1", - "|fixtures/task-parser-tests/example-2.test.ts| DONE parallel slow tests 2.2", - "|fixtures/task-parser-tests/example-2.test.ts| start", - "|fixtures/task-parser-tests/example-2.test.ts| afterAll start (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| beforeAll end (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| afterAll end (suite)", - "|fixtures/task-parser-tests/example-2.test.ts| DONE Skipped test 1", - "|fixtures/task-parser-tests/example-2.test.ts| finish", - ] - `) -}) - -class TaskReporter extends TaskParser implements Reporter { - calls: string[] = [] - - // @ts-expect-error -- not sure why - onInit(ctx) { - super.onInit(ctx) - } - - onTestFilePrepare(file: File) { - this.calls.push(`|${file.name}| start`) - } - - onTestFileFinished(file: File) { - this.calls.push(`|${file.name}| finish`) - } - - onTestStart(test: Test) { - this.calls.push(`|${test.file.name}| RUN ${test.name}`) - } - - onTestFinished(test: Test) { - this.calls.push(`|${test.file.name}| DONE ${test.name}`) - } - - onHookStart(options: HookOptions) { - this.calls.push(`|${options.file.name}| ${options.name} start (${options.type})`) - } - - onHookEnd(options: HookOptions) { - this.calls.push(`|${options.file.name}| ${options.name} end (${options.type})`) - } -} - -class Sorter { - sort(files: TestSpecification[]) { - return files.sort((a, b) => { - const idA = Number.parseInt( - a.moduleId.match(/example-(\d*)\.test\.ts/)![1], - ) - const idB = Number.parseInt( - b.moduleId.match(/example-(\d*)\.test\.ts/)![1], - ) - - if (idA > idB) { - return 1 - } - if (idA < idB) { - return -1 - } - return 0 - }) - } - - shard(files: TestSpecification[]) { - return files - } -} diff --git a/test/reporters/tests/test-run.test.ts b/test/reporters/tests/test-run.test.ts new file mode 100644 index 000000000000..febe229fb4a9 --- /dev/null +++ b/test/reporters/tests/test-run.test.ts @@ -0,0 +1,176 @@ +import type { TestSpecification } from 'vitest/node' +import type { HookOptions, Reporter, TestCase, TestModule } from 'vitest/reporters' +import { sep } from 'node:path' +import { expect, test } from 'vitest' +import { runVitest } from '../../test-utils' + +test('tasks are reported in correct order', async () => { + const reporter = new CustomReporter() + + const { stdout, stderr } = await runVitest({ + config: false, + include: ['./fixtures/test-run-tests/*.test.ts'], + fileParallelism: false, + reporters: [ + // @ts-expect-error -- not sure why + reporter, + ], + sequence: { sequencer: Sorter }, + }) + + expect(stdout).toBe('') + expect(stderr).toBe('') + + // TODO: Let's split this into multiple smaller ones. Split the fixtures into smaller files too. + expect(reporter.calls).toMatchInlineSnapshot(` + [ + "|fixtures/test-run-tests/example-1.test.ts| queued", + "|fixtures/test-run-tests/example-1.test.ts| start", + "|fixtures/test-run-tests/example-1.test.ts| beforeAll start (module)", + "|fixtures/test-run-tests/example-1.test.ts| beforeAll end (module)", + "|fixtures/test-run-tests/example-1.test.ts| RUN some test", + "|fixtures/test-run-tests/example-1.test.ts| beforeAll start (suite)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach end (test)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-1.test.ts| afterEach start (test) (some test)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach end (test)", + "|fixtures/test-run-tests/example-1.test.ts| afterEach end (test)", + "|fixtures/test-run-tests/example-1.test.ts| DONE some test", + "|fixtures/test-run-tests/example-1.test.ts| DONE Fast test 1", + "|fixtures/test-run-tests/example-1.test.ts| RUN Fast test 1", + "|fixtures/test-run-tests/example-1.test.ts| RUN parallel slow tests 1.1", + "|fixtures/test-run-tests/example-1.test.ts| RUN parallel slow tests 1.2", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-1.test.ts| afterEach start (test) (some test)", + "|fixtures/test-run-tests/example-1.test.ts| beforeAll start (suite)", + "|fixtures/test-run-tests/example-1.test.ts| afterAll start (suite)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-1.test.ts| afterEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-1.test.ts| afterEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (parallel slow tests 1.1)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (parallel slow tests 1.2)", + "|fixtures/test-run-tests/example-1.test.ts| DONE parallel slow tests 1.1", + "|fixtures/test-run-tests/example-1.test.ts| DONE parallel slow tests 1.2", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (parallel slow tests 1.1)", + "|fixtures/test-run-tests/example-1.test.ts| afterEach start (test) (parallel slow tests 1.1)", + "|fixtures/test-run-tests/example-1.test.ts| beforeEach start (test) (parallel slow tests 1.2)", + "|fixtures/test-run-tests/example-1.test.ts| afterEach start (test) (parallel slow tests 1.2)", + "|fixtures/test-run-tests/example-1.test.ts| beforeAll start (module)", + "|fixtures/test-run-tests/example-1.test.ts| afterAll start (module)", + "|fixtures/test-run-tests/example-1.test.ts| beforeAll end (module)", + "|fixtures/test-run-tests/example-1.test.ts| afterAll end (module)", + "|fixtures/test-run-tests/example-1.test.ts| DONE Skipped test 1", + "|fixtures/test-run-tests/example-1.test.ts| finish", + "|fixtures/test-run-tests/example-2.test.ts| queued", + "|fixtures/test-run-tests/example-2.test.ts| start", + "|fixtures/test-run-tests/example-2.test.ts| beforeAll start (module)", + "|fixtures/test-run-tests/example-2.test.ts| beforeAll end (module)", + "|fixtures/test-run-tests/example-2.test.ts| RUN some test", + "|fixtures/test-run-tests/example-2.test.ts| beforeAll start (suite)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach end (test)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-2.test.ts| afterEach start (test) (some test)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach end (test)", + "|fixtures/test-run-tests/example-2.test.ts| afterEach end (test)", + "|fixtures/test-run-tests/example-2.test.ts| DONE some test", + "|fixtures/test-run-tests/example-2.test.ts| DONE Fast test 1", + "|fixtures/test-run-tests/example-2.test.ts| RUN Fast test 1", + "|fixtures/test-run-tests/example-2.test.ts| RUN parallel slow tests 2.1", + "|fixtures/test-run-tests/example-2.test.ts| RUN parallel slow tests 2.2", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (some test)", + "|fixtures/test-run-tests/example-2.test.ts| afterEach start (test) (some test)", + "|fixtures/test-run-tests/example-2.test.ts| beforeAll start (suite)", + "|fixtures/test-run-tests/example-2.test.ts| afterAll start (suite)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-2.test.ts| afterEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-2.test.ts| afterEach start (test) (Fast test 1)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (parallel slow tests 2.1)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (parallel slow tests 2.2)", + "|fixtures/test-run-tests/example-2.test.ts| DONE parallel slow tests 2.1", + "|fixtures/test-run-tests/example-2.test.ts| DONE parallel slow tests 2.2", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (parallel slow tests 2.1)", + "|fixtures/test-run-tests/example-2.test.ts| afterEach start (test) (parallel slow tests 2.1)", + "|fixtures/test-run-tests/example-2.test.ts| beforeEach start (test) (parallel slow tests 2.2)", + "|fixtures/test-run-tests/example-2.test.ts| afterEach start (test) (parallel slow tests 2.2)", + "|fixtures/test-run-tests/example-2.test.ts| beforeAll start (module)", + "|fixtures/test-run-tests/example-2.test.ts| afterAll start (module)", + "|fixtures/test-run-tests/example-2.test.ts| beforeAll end (module)", + "|fixtures/test-run-tests/example-2.test.ts| afterAll end (module)", + "|fixtures/test-run-tests/example-2.test.ts| DONE Skipped test 1", + "|fixtures/test-run-tests/example-2.test.ts| finish", + ] + `) +}) + +class CustomReporter implements Reporter { + calls: string[] = [] + + onTestModuleQueued(module: TestModule) { + this.calls.push(`|${normalizeFilename(module)}| queued`) + } + + onTestModuleStart(module: TestModule) { + this.calls.push(`|${normalizeFilename(module)}| start`) + } + + onTestModuleEnd(module: TestModule) { + this.calls.push(`|${normalizeFilename(module)}| finish`) + } + + onTestCaseStart(test: TestCase) { + this.calls.push(`|${normalizeFilename(test.module)}| RUN ${test.name}`) + } + + onTestCaseEnd(test: TestCase) { + this.calls.push(`|${normalizeFilename(test.module)}| DONE ${test.name}`) + } + + onHookStart(hook: HookOptions) { + const module = hook.entity.type === 'module' ? hook.entity : hook.entity.module + const name = hook.entity.type === 'test' ? ` (${hook.entity.name})` : '' + this.calls.push(`|${normalizeFilename(module)}| ${hook.name} start (${hook.entity.type})${name}`) + } + + onHookEnd(hook: HookOptions) { + const module = hook.entity.type === 'module' ? hook.entity : hook.entity.module + this.calls.push(`|${normalizeFilename(module)}| ${hook.name} end (${hook.entity.type})`) + } +} + +class Sorter { + sort(files: TestSpecification[]) { + return files.sort((a, b) => { + const idA = Number.parseInt( + a.moduleId.match(/example-(\d*)\.test\.ts/)![1], + ) + const idB = Number.parseInt( + b.moduleId.match(/example-(\d*)\.test\.ts/)![1], + ) + + if (idA > idB) { + return 1 + } + if (idA < idB) { + return -1 + } + return 0 + }) + } + + shard(files: TestSpecification[]) { + return files + } +} + +function normalizeFilename(module: TestModule) { + return module.moduleId + .replace(module.project.config.root, '') + .replaceAll(sep, '/') + .substring(1) +}