diff --git a/.github/workflows/build-and-review-pr.yml b/.github/workflows/build-and-review-pr.yml index 67e987c..970b4e1 100644 --- a/.github/workflows/build-and-review-pr.yml +++ b/.github/workflows/build-and-review-pr.yml @@ -21,6 +21,13 @@ on: # without disabling that requirement. If we have a status check that is always produced, # we can also use that to require all branches be up to date before they are merged. +env: + EMPTY_JSON_RESULTS_FILE: './test/input-files/empty-results.json' + FAILING_JSON_RESULTS_FILE: './test/input-files/failing.json' + NO_TESTS_JSON_RESULTS_FILE: './test/input-files/no-tests.json' + PASSING_JSON_RESULTS_FILE: './test/input-files/passing.json' + TRUNCATE_JSON_RESULTS_FILE: './test/input-files/truncate.json' + jobs: build-and-review-pr: # This reusable workflow will check to see if an action's source code has changed based on @@ -60,3 +67,784 @@ jobs: # The npm script to run to build the action. This is typically 'npm run build' if the # action needs to be compiled. For composite-run-steps actions this is typically empty. build-command: 'npm run build' + + unit-tests: + runs-on: ubuntu-latest + env: + PASSING_MD_FILE: './test/expected-markdown/unit-tests/passing-tests.md' + FAILING_MD_FILE: './test/expected-markdown/unit-tests/failing-tests.md' + NO_TESTS_MD_FILE: './test/expected-markdown/unit-tests/no-tests.md' + + steps: + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' SETUP ' + run: echo "" + + - name: Setup - Checkout the action + uses: actions/checkout@v4 + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 1 - MISSING TOKEN INPUT ' + run: echo "" + + - name: 1 - When process-jest-test-results is called with a missing github-token input + id: missing-github-token + if: always() + continue-on-error: true # This is needed because we expect the step to fail but we need it to "pass" in order for the test job to succeed. + uses: ./ + with: + github-token: '' + results-file: '${{ env.EMPTY_JSON_RESULTS_FILE }}' + + - name: 1 - Then the action outcome should be failure + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "failure" --actual "${{ steps.missing-github-token.outcome }}" + + - name: 1 - And each of the outputs should be empty + if: always() + run: | + ./test/assert-value-is-empty.sh --name "test-outcome output" --value "${{ steps.missing-github-token.outputs.test-outcome }}" + ./test/assert-value-is-empty.sh --name "test-results-truncated output" --value "${{ steps.missing-github-token.outputs.test-results-truncated }}" + ./test/assert-value-is-empty.sh --name "test-results-file-path output" --value "${{ steps.missing-github-token.outputs.test-results-file-path }}" + ./test/assert-value-is-empty.sh --name "status-check-id output" --value "${{ steps.missing-github-token.outputs.status-check-id }}" + ./test/assert-value-is-empty.sh --name "pr-comment-id output" --value "${{ steps.missing-github-token.outputs.pr-comment-id }}" + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 2 - MISSING RESULTS_FILE INPUT ' + run: echo "" + + - name: 2 - When process-jest-test-results is called with a missing results-file input + id: missing-results-file + if: always() + continue-on-error: true # This is needed because we expect the step to fail but we need it to "pass" in order for the test job to succeed. + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: '' + + - name: 2 - Then the action outcome should be failure + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "failure" --actual "${{ steps.missing-results-file.outcome }}" + + - name: 2 - And each of the outputs should be empty + if: always() + run: | + ./test/assert-value-is-empty.sh --name "test-outcome output" --value "${{ steps.missing-results-file.outputs.test-outcome }}" + ./test/assert-value-is-empty.sh --name "test-results-truncated output" --value "${{ steps.missing-results-file.outputs.test-results-truncated }}" + ./test/assert-value-is-empty.sh --name "test-results-file-path output" --value "${{ steps.missing-results-file.outputs.test-results-file-path }}" + ./test/assert-value-is-empty.sh --name "status-check-id output" --value "${{ steps.missing-results-file.outputs.status-check-id }}" + ./test/assert-value-is-empty.sh --name "pr-comment-id output" --value "${{ steps.missing-results-file.outputs.pr-comment-id }}" + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 3 - RESULTS_FILE DOES NOT EXIST ' + run: echo "" + + - name: 3 - When process-jest-test-results is called with a results file that does not exist + id: file-does-not-exist + if: always() + continue-on-error: true # This is needed because we expect the step to fail but we need it to "pass" in order for the test job to succeed. + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: './test/expected-markdown/input-files/file-that-does-not-exist.json' + create-status-check: false + create-pr-comment: false + + - name: 3 - Then the action outcome should be failure + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "failure" --actual "${{ steps.file-does-not-exist.outcome }}" + + - name: 3 - And the 'test-outcome' output should be Failed + if: always() + run: | + ./test/assert-values-match.sh --name "test-outcome output" --expected 'Failed' --actual "${{ steps.file-does-not-exist.outputs.test-outcome }}" + + - name: 3 - And the remaining outputs should be empty + if: always() + run: | + ./test/assert-value-is-empty.sh --name "test-results-truncated output" --value "${{ steps.file-does-not-exist.outputs.test-results-truncated }}" + ./test/assert-value-is-empty.sh --name "test-results-file-path output" --value "${{ steps.file-does-not-exist.outputs.test-results-file-path }}" + ./test/assert-value-is-empty.sh --name "status-check-id output" --value "${{ steps.file-does-not-exist.outputs.status-check-id }}" + ./test/assert-value-is-empty.sh --name "pr-comment-id output" --value "${{ steps.file-does-not-exist.outputs.pr-comment-id }}" + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 4 - RESULTS_FILE IS EMPTY ' + run: echo "" + + - name: 4 - When process-jest-test-results is called with a results file that is empty + id: empty-file + if: always() + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: '${{ env.EMPTY_JSON_RESULTS_FILE }}' + create-status-check: false + create-pr-comment: false + + - name: 4 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.empty-file.outcome }}" + + - name: 4 - And the 'test-outcome' output should be Failed + if: always() + run: | + ./test/assert-values-match.sh --name "test-outcome output" --expected 'Failed' --actual "${{ steps.empty-file.outputs.test-outcome }}" + + - name: 4 - And the remaining outputs should be empty + if: always() + run: | + ./test/assert-value-is-empty.sh --name "test-results-truncated output" --value "${{ steps.empty-file.outputs.test-results-truncated }}" + ./test/assert-value-is-empty.sh --name "test-results-file-path output" --value "${{ steps.empty-file.outputs.test-results-file-path }}" + ./test/assert-value-is-empty.sh --name "status-check-id output" --value "${{ steps.empty-file.outputs.status-check-id }}" + ./test/assert-value-is-empty.sh --name "pr-comment-id output" --value "${{ steps.empty-file.outputs.pr-comment-id }}" + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 5 - PASSING TESTS ' + run: echo "" + + - name: 5 - When process-jest-test-results is called with a results file that has all passing tests + id: passing-tests + if: always() + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: '${{ env.PASSING_JSON_RESULTS_FILE }}' + create-status-check: false + create-pr-comment: false + timezone: 'America/Denver' + report-name: 'Passing Test Results' + + - name: 5 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.passing-tests.outcome }}" + + - name: 5 - And the 'test-outcome' output should be Passed + if: always() + run: | + ./test/assert-values-match.sh --name "test-outcome output" --expected 'Passed' --actual "${{ steps.passing-tests.outputs.test-outcome }}" + + - name: 5 - And the 'test-results-file-path output' should be populated + if: always() + run: ./test/assert-value-is-not-empty.sh --name "test-results-file-path output" --value "${{ steps.passing-tests.outputs.test-results-file-path }}" + + - name: 5 - And the remaining outputs should be empty since status checks and pr comments were not created + if: always() + run: | + ./test/assert-value-is-empty.sh --name "status-check-id output" --value "${{ steps.passing-tests.outputs.status-check-id }}" + ./test/assert-value-is-empty.sh --name "pr-comment-id output" --value "${{ steps.passing-tests.outputs.pr-comment-id }}" + ./test/assert-value-is-empty.sh --name "test-results-truncated output" --value "${{ steps.passing-tests.outputs.test-results-truncated }}" + + - name: 5 - And the contents of test-results.md file should match the contents of ${{ env.PASSING_MD_FILE }} file + if: always() + run: | + # Comparing the test-results.md file will ensure that: + # - The provided timezone (MST/MDT) is used + # - The provided report name is used + # - The badge has the right count/status/color + # - The Duration stats are included in the report + # - The Counter stats are included in the report + + expectedFileName="${{ env.PASSING_MD_FILE }}" + actualFileName="${{ steps.passing-tests.outputs.test-results-file-path }}" + ./test/assert-file-contents-match.sh --expectedFileName $expectedFileName --actualFileName $actualFileName + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 6 - FAILING TESTS ' + run: echo "" + + - name: 6 - When process-jest-test-results is called with a results file that has failing tests + id: failing-tests + if: always() + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: '${{ env.FAILING_JSON_RESULTS_FILE }}' + create-status-check: false + create-pr-comment: false + # timezone: 'UTC' # Test the default + # report-name: 'Jest Test Results' # Test the default + + - name: 6 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.failing-tests.outcome }}" + + - name: 6 - And the 'test-outcome' output should be Failed + if: always() + run: | + ./test/assert-values-match.sh --name "test-outcome output" --expected 'Failed' --actual "${{ steps.failing-tests.outputs.test-outcome }}" + + - name: 6 - And the 'test-results-file-path output' should be populated + if: always() + run: ./test/assert-value-is-not-empty.sh --name "test-results-file-path output" --value "${{ steps.failing-tests.outputs.test-results-file-path }}" + + - name: 6 - And the remaining outputs should be empty since status checks and pr comments were not created + if: always() + run: | + ./test/assert-value-is-empty.sh --name "status-check-id output" --value "${{ steps.failing-tests.outputs.status-check-id }}" + ./test/assert-value-is-empty.sh --name "pr-comment-id output" --value "${{ steps.failing-tests.outputs.pr-comment-id }}" + ./test/assert-value-is-empty.sh --name "test-results-truncated output" --value "${{ steps.failing-tests.outputs.test-results-truncated }}" + + - name: 6 - And the contents of test-results.md file should match the contents of ${{ env.FAILING_MD_FILE }} file + if: always() + run: | + # Comparing the test-results.md file will ensure that: + # - The default timezone (UTC) is used + # - The default report name (Jest Test Results) is used + # - The badge has the right count/status/color + # - The Duration stats are included in the report + # - The Counter stats are included in the report + # - The failing test details are included in the report + + expectedFileName="${{ env.FAILING_MD_FILE }}" + actualFileName="${{ steps.failing-tests.outputs.test-results-file-path }}" + ./test/assert-file-contents-match.sh --expectedFileName $expectedFileName --actualFileName $actualFileName + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 7 - NO TESTS REPORTED ' + run: echo "" + + - name: 7 - When process-jest-test-results is called with a results file that has no tests reported + id: no-tests + if: always() + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: '${{ env.NO_TESTS_JSON_RESULTS_FILE }}' + report-name: 'Missing Test Results' + create-status-check: false + create-pr-comment: false + + - name: 7 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.no-tests.outcome }}" + + - name: 7 - And the 'test-outcome' output should be Passed + if: always() + run: | + ./test/assert-values-match.sh --name "test-outcome output" --expected 'Passed' --actual "${{ steps.no-tests.outputs.test-outcome }}" + + - name: 7 - And the 'test-results-file-path output' should be populated + if: always() + run: ./test/assert-value-is-not-empty.sh --name "test-results-file-path output" --value "${{ steps.no-tests.outputs.test-results-file-path }}" + + - name: 7 - And the remaining outputs should be empty since status checks and pr comments were not created + if: always() + run: | + ./test/assert-value-is-empty.sh --name "status-check-id output" --value "${{ steps.no-tests.outputs.status-check-id }}" + ./test/assert-value-is-empty.sh --name "pr-comment-id output" --value "${{ steps.no-tests.outputs.pr-comment-id }}" + ./test/assert-value-is-empty.sh --name "test-results-truncated output" --value "${{ steps.no-tests.outputs.test-results-truncated }}" + + - name: 7 - And the contents of test-results.md file should match the contents of ${{ env.NO_TESTS_MD_FILE }} file + if: always() + run: | + # Comparing the test-results.md file will ensure that: + # - The default timezone (UTC) is used + # - The default report name (Missing Test Results) is used + # - The badge has the right count/status/color + # - The Duration section should not be included in the report + # - The Counter stats are included in the report + # - The no-test details are included in the report + + expectedFileName="${{ env.NO_TESTS_MD_FILE }}" + actualFileName="${{ steps.no-tests.outputs.test-results-file-path }}" + ./test/assert-file-contents-match.sh --expectedFileName $expectedFileName --actualFileName $actualFileName + + test-status-checks: + runs-on: ubuntu-latest + env: + NO_FAILURES_MD_FILE: './test/expected-markdown/status-checks/no-failures.md' + IGNORE_FAILURES_MD_FILE: './test/expected-markdown/status-checks/ignore-failures.md' + ALLOW_FAILURES_MD_FILE: './test/expected-markdown/status-checks/allow-failures.md' + + NO_FAILURES_REPORT_NAME: 'No Failures Scenario' + IGNORE_FAILURES_REPORT_NAME: 'Ignore Failures Scenario' + ALLOW_FAILURES_REPORT_NAME: 'Allow Failures Scenario' + + steps: + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' SETUP ' + run: echo "" + + - name: Setup - Fail test job if fork + run: | + if [ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]; then + echo "This test job requires the `checks: write` scope on the GITHUB_TOKEN which PRs from forks do not have. Before this PR can be merged, the tests should be run on an intermediate branch created by repository owners." + exit 1 + fi + + - name: Setup - Checkout the action + uses: actions/checkout@v4 + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 8 - STATUS CHECK - NO FAILURES ' + run: echo "" + + - name: 8 - When process-jest-test-results is called with no failures + if: always() + id: no-failures + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: ${{ env.PASSING_JSON_RESULTS_FILE }} + report-name: ${{ env.NO_FAILURES_REPORT_NAME }} + create-status-check: true + create-pr-comment: false + + - name: 8 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.no-failures.outcome }}" + + - name: 8 - And the status-check-id output should be populated + if: always() + run: ./test/assert-value-is-not-empty.sh --name "status-check-id output" --value "${{ steps.no-failures.outputs.status-check-id }}" + + - name: 8 - And the test-outcome output should be Passed + if: always() + run: ./test/assert-values-match.sh --name "test-outcome output" --expected "Passed" --actual "${{ steps.no-failures.outputs.test-outcome }}" + + - name: 8 - And the status check should match the inputs + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const assertStatusCheckExists = require('./test/assert-status-check-exists.js'); + const assertStatusCheckMatchesExpectations = require('./test/assert-status-check-matches-expectations.js'); + + const checkId = '${{ steps.no-failures.outputs.status-check-id }}'; + const actualCheck = await assertStatusCheckExists(github, context, core, checkId); + const expectedBody = fs.readFileSync('${{ env.NO_FAILURES_MD_FILE }}', 'utf8'); + + const expectedValues = { + name: 'status check - ${{ env.NO_FAILURES_REPORT_NAME }}'.toLowerCase(), + status: 'completed', + conclusion: 'success', + title: '${{ env.NO_FAILURES_REPORT_NAME }}', + text: expectedBody + }; + assertStatusCheckMatchesExpectations(core, actualCheck, expectedValues); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 9 - STATUS CHECK - IGNORE FAILURES ' + run: echo "" + + - name: 9 - When process-jest-test-results is called with test failures & ignore-test-failures=true + if: always() + id: ignore-failures + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: ${{ env.FAILING_JSON_RESULTS_FILE }} + report-name: ${{ env.IGNORE_FAILURES_REPORT_NAME }} + create-status-check: true + ignore-test-failures: true + create-pr-comment: false + + - name: 9 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.ignore-failures.outcome }}" + + - name: 9 - And the status-check-id output should be populated + if: always() + run: ./test/assert-value-is-not-empty.sh --name "status-check-id output" --value "${{ steps.ignore-failures.outputs.status-check-id }}" + + - name: 9 - And the test-outcome output should be Failed + if: always() + run: ./test/assert-values-match.sh --name "test-outcome output" --expected "Failed" --actual "${{ steps.ignore-failures.outputs.test-outcome }}" + + - name: 9 - And the status check should match the inputs + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const assertStatusCheckExists = require('./test/assert-status-check-exists.js'); + const assertStatusCheckMatchesExpectations = require('./test/assert-status-check-matches-expectations.js'); + + const checkId = '${{ steps.ignore-failures.outputs.status-check-id }}'; + const actualCheck = await assertStatusCheckExists(github, context, core, checkId); + const expectedBody = fs.readFileSync('${{ env.IGNORE_FAILURES_MD_FILE }}', 'utf8'); + + const expectedValues = { + name: 'status check - ${{ env.IGNORE_FAILURES_REPORT_NAME }}'.toLowerCase(), + status: 'completed', + conclusion: 'neutral', + title: '${{ env.IGNORE_FAILURES_REPORT_NAME }}', + text: expectedBody + }; + assertStatusCheckMatchesExpectations(core, actualCheck, expectedValues); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 10 - STATUS CHECK - ALLOW FAILURES ' + run: echo "" + + - name: 10 - When process-jest-test-results is called with test failures & ignore-test-failures=false + if: always() + id: allow-failures + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: ${{ env.FAILING_JSON_RESULTS_FILE }} + report-name: ${{ env.ALLOW_FAILURES_REPORT_NAME }} + create-status-check: true + ignore-test-failures: false + create-pr-comment: false + + - name: 10 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.allow-failures.outcome }}" + + - name: 10 - And the status-check-id output should be populated + if: always() + run: ./test/assert-value-is-not-empty.sh --name "status-check-id output" --value "${{ steps.allow-failures.outputs.status-check-id }}" + + - name: 10 - And the test-outcome output should be Failed + if: always() + run: ./test/assert-values-match.sh --name "test-outcome output" --expected "Failed" --actual "${{ steps.allow-failures.outputs.test-outcome }}" + + - name: 10 - And the status check should match the inputs + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const assertStatusCheckExists = require('./test/assert-status-check-exists.js'); + const assertStatusCheckMatchesExpectations = require('./test/assert-status-check-matches-expectations.js'); + + const checkId = '${{ steps.allow-failures.outputs.status-check-id }}'; + const actualCheck = await assertStatusCheckExists(github, context, core, checkId); + const expectedBody = fs.readFileSync('${{ env.ALLOW_FAILURES_MD_FILE }}', 'utf8'); + + const expectedValues = { + name: 'status check - ${{ env.ALLOW_FAILURES_REPORT_NAME }}'.toLowerCase(), + status: 'completed', + conclusion: 'failure', + title: '${{ env.ALLOW_FAILURES_REPORT_NAME }}', + text: expectedBody + }; + assertStatusCheckMatchesExpectations(core, actualCheck, expectedValues); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEARDOWN ' + run: echo "" + + - name: Teardown - Modify failing Status Check conclusion + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const updateFailingStatusCheck = require('./test/update-failing-status-check.js'); + + await updateFailingStatusCheck(github, context, core, '${{ steps.allow-failures.outputs.status-check-id }}'); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + + test-pr-comments: + runs-on: ubuntu-latest + env: + EXISTING_COMMENT_ID: '' + COMMENT_IDENTIFIER: 'existing-comment-${{ github.run_id }}' + + UPDATE_WITH_MATCHING_PREFIX_MD_FILE: './test/expected-markdown/pr-comments/update-matching-prefix.md' + UPDATE_WITHOUT_MATCHING_PREFIX_MD_FILE: './test/expected-markdown/pr-comments/update-without-matching-prefix.md' + NO_UPDATE_MD_FILE: './test/expected-markdown/pr-comments/no-update.md' + TRUNCATE_FULL_MD_FILE: './test/expected-markdown/pr-comments/truncate-full-markdown.md' + TRUNCATE_TRUNCATED_MD_FILE: './test/expected-markdown/pr-comments/truncate-truncated-markdown.md' + + UPDATE_WITH_MATCHING_PREFIX_REPORT_NAME: 'Update Comment with Matching Prefix Scenario' + UPDATE_WITHOUT_MATCHING_PREFIX_REPORT_NAME: 'Update Comment but no Matching Prefix Scenario' + NO_UPDATE_REPORT_NAME: 'Do Not Update Comment Scenario' + TRUNCATE_FAILURES_REPORT_NAME: 'Truncated PR Comment Scenario' + + steps: + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' SETUP ' + run: echo "" + + - name: Setup - Fail test job if fork + run: | + if [ "${{ github.event.pull_request.head.repo.fork }}" == "true" ]; then + echo "This test job requires the `pull_request: write` scope on the GITHUB_TOKEN which PRs from forks do not have. Before this PR can be merged, the tests should be run on an intermediate branch created by repository owners." + exit 1 + fi + + - name: Setup - Checkout the action + uses: actions/checkout@v4 + + - name: Setup - Delete pre-existing process-jest-test-results PR Comments + if: always() + uses: actions/github-script@v7 + with: + script: | + const deletePrComments = require('./test/delete-pre-existing-comments.js'); + await deletePrComments(github, context, core); + + - name: Setup - Create a process-jest-test-results comment that can be updated + if: always() + uses: actions/github-script@v7 + with: + script: | + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: `\nThis comment will be replaced soon.` + }) + .then(response => { + core.info(`The 'existing' process-jest-test-results comment has id: ${response.data.id}`); + core.exportVariable('EXISTING_COMMENT_ID', response.data.id); + }) + .catch(error => { + core.setFailed(`An error occurred in the setup step while creating a comment: ${error.message}`); + }); + await new Promise(r => setTimeout(r, 5 * 1000)); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 11 - PR COMMENT - UPDATE W/ MATCHING PREFIX ' + run: echo "" + + - name: 11 - When process-jest-test-results is called with updateComment=true and there is a comment with matching prefix + if: always() + id: update-with-matching-prefix + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: ${{ env.PASSING_JSON_RESULTS_FILE }} + report-name: ${{ env.UPDATE_WITH_MATCHING_PREFIX_REPORT_NAME }} + create-status-check: false + create-pr-comment: true + update-comment-if-one-exists: true + comment-identifier: ${{ env.COMMENT_IDENTIFIER }} + + - name: 11 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.update-with-matching-prefix.outcome }}" + + - name: 11 - And the pr-comment-id output should match the existing comment id + if: always() + run: ./test/assert-values-match.sh --name "pr-comment-id output" --expected "${{ env.EXISTING_COMMENT_ID }}" --actual "${{ steps.update-with-matching-prefix.outputs.pr-comment-id }}" + + - name: 11 - And the test-results-truncated output should be false + if: always() + run: ./test/assert-values-match.sh --name "test-results-truncated output" --expected "false" --actual "${{ steps.update-with-matching-prefix.outputs.test-results-truncated }}" + + - name: 11 - And the pr-comment should match the match the expected values + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const assertCommentExists = require('./test/assert-pr-comment-exists.js'); + const assertCommentMatchesExpectations = require('./test/assert-pr-comment-matches-expectations.js'); + + const commentId = '${{ steps.update-with-matching-prefix.outputs.pr-comment-id }}'; + const actualComment = await assertCommentExists(github, context, core, commentId); + + const expectedMarkdown = fs.readFileSync('${{ env.UPDATE_WITH_MATCHING_PREFIX_MD_FILE }}', 'utf8'); + const actualTestResults = fs.readFileSync('${{ steps.update-with-matching-prefix.outputs.test-results-file-path }}', 'utf8'); + + const expectedComment = { + prefix: '', + fullMarkdown: expectedMarkdown, + action: 'updated', + truncated: false + }; + assertCommentMatchesExpectations(core, actualComment, actualTestResults, expectedComment); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 12 - PR COMMENT - UPDATE W/O MATCHING PREFIX ' + run: echo "" + + - name: 12 - When process-jest-test-results is called with updateComment=true but there is no comment with matching prefix + if: always() + id: update-without-matching-prefix + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: ${{ env.PASSING_JSON_RESULTS_FILE }} + report-name: ${{ env.UPDATE_WITHOUT_MATCHING_PREFIX_REPORT_NAME }} + create-status-check: false + create-pr-comment: true + update-comment-if-one-exists: true + comment-identifier: 'different-identifier-${{ github.run_id }}' + + - name: 12 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.update-without-matching-prefix.outcome }}" + + - name: 12 - And the pr-comment-id output should be different than the existing comment id + if: always() + run: ./test/assert-values-do-not-match.sh --name "pr-comment-id output" --value1 "${{ env.EXISTING_COMMENT_ID }}" --value2 "${{ steps.update-without-matching-prefix.outputs.pr-comment-id }}" + + - name: 12 - And the test-results-truncated output should be false + if: always() + run: ./test/assert-values-match.sh --name "test-results-truncated output" --expected "false" --actual "${{ steps.update-without-matching-prefix.outputs.test-results-truncated }}" + + - name: 12 - And the pr-comment should match the expected values + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const assertCommentExists = require('./test/assert-pr-comment-exists.js'); + const assertCommentMatchesExpectations = require('./test/assert-pr-comment-matches-expectations.js'); + + const commentId = '${{ steps.update-without-matching-prefix.outputs.pr-comment-id }}'; + const actualComment = await assertCommentExists(github, context, core, commentId); + + const expectedMarkdown = fs.readFileSync('${{ env.UPDATE_WITHOUT_MATCHING_PREFIX_MD_FILE }}', 'utf8'); + const actualTestResults = fs.readFileSync('${{ steps.update-without-matching-prefix.outputs.test-results-file-path }}', 'utf8'); + + const expectedComment = { + prefix: '', + fullMarkdown: expectedMarkdown, + action: 'created', + truncated: false + }; + assertCommentMatchesExpectations(core, actualComment, actualTestResults, expectedComment); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 13 - PR COMMENT - NO UPDATE ' + run: echo "" + + - name: 13 - When process-jest-test-results is called with updateComment=false + if: always() + id: matching-prefix-no-update + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: ${{ env.PASSING_JSON_RESULTS_FILE }} + report-name: ${{ env.NO_UPDATE_REPORT_NAME }} + create-status-check: false + create-pr-comment: true + update-comment-if-one-exists: false + + - name: 13 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.matching-prefix-no-update.outcome }}" + + - name: 13 - And the pr-comment-id output should be different than the existing comment id + if: always() + run: ./test/assert-values-do-not-match.sh --name "pr-comment-id output" --value1 "${{ env.EXISTING_COMMENT_ID }}" --value2 "${{ steps.matching-prefix-no-update.outputs.pr-comment-id }}" + + - name: 13 - And the test-results-truncated output should be false + if: always() + run: ./test/assert-values-match.sh --name "test-results-truncated output" --expected "false" --actual "${{ steps.matching-prefix-no-update.outputs.test-results-truncated }}" + + - name: 13 - And the pr-comment should match the expected values + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const assertCommentExists = require('./test/assert-pr-comment-exists.js'); + const assertCommentMatchesExpectations = require('./test/assert-pr-comment-matches-expectations.js'); + + const commentId = '${{ steps.matching-prefix-no-update.outputs.pr-comment-id }}'; + const actualComment = await assertCommentExists(github, context, core, commentId); + + const expectedMarkdown = fs.readFileSync('${{ env.NO_UPDATE_MD_FILE }}', 'utf8'); + const actualTestResults = fs.readFileSync('${{ steps.matching-prefix-no-update.outputs.test-results-file-path }}', 'utf8'); + + const expectedComment = { + prefix: ``, + fullMarkdown: expectedMarkdown, + action: 'created', + truncated: false + }; + assertCommentMatchesExpectations(core, actualComment, actualTestResults, expectedComment); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEST 14 - PR COMMENT - TRUNCATE ' + run: echo "" + + - name: 14 - When process-jest-test-results is called with a large comment that needs to be truncated + if: always() + id: truncate + uses: ./ + with: + github-token: '${{ secrets.GITHUB_TOKEN }}' + results-file: ${{ env.TRUNCATE_JSON_RESULTS_FILE }} + report-name: ${{ env.TRUNCATE_FAILURES_REPORT_NAME }} + create-status-check: false + create-pr-comment: true + update-comment-if-one-exists: true + comment-identifier: ${{ env.COMMENT_IDENTIFIER }} + + - name: 14 - Then the action outcome should be success + if: always() + run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.truncate.outcome }}" + + - name: 14 - And the pr-comment-id output should match the existing comment id + if: always() + run: ./test/assert-values-match.sh --name "pr-comment-id output" --expected "${{ env.EXISTING_COMMENT_ID }}" --actual "${{ steps.truncate.outputs.pr-comment-id }}" + + - name: 14 - And the test-results-truncated output should be true + if: always() + run: ./test/assert-values-match.sh --name "test-results-truncated output" --expected "true" --actual "${{ steps.truncate.outputs.test-results-truncated }}" + + - name: 14 - And the pr-comment should match the expected values + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const assertCommentExists = require('./test/assert-pr-comment-exists.js'); + const assertCommentMatchesExpectations = require('./test/assert-pr-comment-matches-expectations.js'); + + const commentId = '${{ steps.truncate.outputs.pr-comment-id }}'; + const actualComment = await assertCommentExists(github, context, core, commentId); + + const expectedMarkdown = fs.readFileSync('${{ env.TRUNCATE_FULL_MD_FILE }}', 'utf8'); + const expectedTruncatedMarkdown = fs.readFileSync('${{ env.TRUNCATE_TRUNCATED_MD_FILE }}', 'utf8'); + const actualTestResults = fs.readFileSync('${{ steps.truncate.outputs.test-results-file-path }}', 'utf8'); + const truncateMessage = 'Test results truncated due to character limit. See full report in output.'; + + const expectedComment = { + prefix: ``, + fullMarkdown: expectedMarkdown, + action: 'updated', + truncated: true, + truncatedMarkdown: expectedTruncatedMarkdown, + }; + assertCommentMatchesExpectations(core, actualComment, actualTestResults, expectedComment); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" + - name: ' TEARDOWN ' + run: echo "" + + - name: Teardown - Delete PR Comments + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const deletePrComment = require('./test/delete-pr-comment.js'); + + await deletePrComment(github, context, core, '${{ env.EXISTING_COMMENT_ID }}'); + await deletePrComment(github, context, core, '${{ steps.matching-prefix-no-update.outputs.pr-comment-id }}'); + await deletePrComment(github, context, core, '${{ steps.update-without-matching-prefix.outputs.pr-comment-id }}'); + + - name: '-------------------------------------------------------------------------------------------------------------' + run: echo "" diff --git a/.gitignore b/.gitignore index 43f90b8..35853dd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ -node_modules/ -/.vscode/launch.json +node_modules/ +/.vscode/launch.json +/pull-request.json diff --git a/README.md b/README.md index d39b56e..b3ab029 100644 --- a/README.md +++ b/README.md @@ -29,11 +29,18 @@ This action does not run the Jest tests itself and it can only process one resul ## Failures -The status check can be seen as a new item on the workflow run, a PR comment or on the PR Status Check section. If the test results contain failures, the status check will be marked as failed. Having the status check marked as failed will prevent PRs from being merged. If this status check behavior is not desired, the `ignore-test-failures` input can be set and the outcome will be marked as neutral if test failures are detected. The status badge that is shown in the comment or status check body will still indicate it was a failure though. +The test status & action's conclusion can be viewed in multiple places: + +- In the body of a PR comment this action generates +- Next to the name of one of the status checks under the `Checks` section of a PR +- Next to the name of one of the status checks under the `Jobs` section of the workflow run +- In the body of a status check listed on the workflow run + +If the test results contain failures, the status check's conclusion will be set to `failure`. If the status check is required and its conclusion is `failure` the PR cannot be merged. If this required status check behavior is not desired, the `ignore-test-failures` input can be set and the conclusion will be marked as `neutral` if test failures are detected. The status badge that is shown in the comment or status check body will still indicate it was a `failure` though. ## Limitations -GitHub does have a size limitation of 65535 characters for a Status Check body or a PR Comment. This action will fail if the test results exceed the GitHub [limit]. To mitigate this size issue only failed tests are included in the output. +GitHub does have a size limitation of 65535 characters for a Status Check body or a PR Comment. This action would fail if the test results exceeded the GitHub [limit]. To mitigate this size issue only details for failed tests are included in the output in addition to a badge, duration info and outcome info. If the comment still exceeds that size, it will be truncated with a note to see the remaining output in the log. If you have multiple workflows triggered by the same `pull_request` or `push` event, GitHub creates one checksuite for that commit. The checksuite gets assigned to one of the workflows randomly and all status checks for that commit are reported to that checksuite. That means if there are multiple workflows with the same trigger, your status checks may show on a different workflow run than the run that created them. @@ -61,22 +68,27 @@ For failed test runs you can expand each failed test and view more details about ## Inputs -| Parameter | Is Required | Default | Description | -|--------------------------------|-------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `github-token` | true | N/A | Used for the GitHub Checks API. Value is generally: secrets.GITHUB_TOKEN. | -| `results-file` | true | N/A | The json results file generated by jest. | -| `report-name` | false | jest test results | The desired name of the report that is shown on the PR Comment and inside the Status Check. | -| `create-status-check` | false | true | Flag indicating whether a status check with jest test results should be generated. | -| `create-pr-comment` | false | true | Flag indicating whether a PR comment with jest test results should be generated. When `true` the default behavior is to update an existing comment if one exists. | -| `update-comment-if-one-exists` | false | true | When `create-pr-comment` is true, this flag determines whether a new comment is created or if the action updates an existing comment if one is found which is the default behavior. | -| `ignore-test-failures` | false | `false` | When set to true the check status is set to `Neutral` when there are test failures and it will not block pull requests. | -| `timezone` | false | `UTC` | IANA time zone name (e.g. America/Denver) to display dates in. | +| Parameter | Is Required | Default | Description | +|--------------------------------|-------------|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `github-token` | true | N/A | Used for the GitHub Checks API. Value is generally: `secrets.GITHUB_TOKEN`. | +| `results-file` | true | N/A | The json results file generated by jest. | +| `report-name` | false | `Jest Test Results` | The desired name of the report that is shown on the PR Comment and inside the Status Check. | +| `create-status-check` | false | `true` | Flag indicating whether a status check with jest test results should be generated. | +| `ignore-test-failures` | false | `false` | If there are test failures, the check's conclusion is set to `neutral` so it will not block pull requests.

*Only applicable when `create-status-check` is true.* | +| `create-pr-comment` | false | `true` | Flag indicating whether a PR comment with jest test results should be generated. When `true` the default behavior is to update an existing comment if one exists. | +| `update-comment-if-one-exists` | false | `true` | This flag determines whether a new comment is created or if the action updates an existing comment (*if one is found*).

*Only applicable when `create-pr-comment` is true.* | +| `comment-identifier` | false | `${{ env.GITHUB-JOB }}_${{ env.GITHUB-ACTION }}` | A unique identifier which will be added to the generated markdown as a comment (*it will not be visible in the PR comment*).

This identifier enables creating then updating separate results comments on the PR if more than one instance of this action is included in a single job. This can be helpful when there are multiple test projects that run separately but are part of the same job.

*Only applicable when `create-pr-comment` is true.* | +| `timezone` | false | `UTC` | IANA time zone name (e.g. America/Denver) to display dates in. | ## Outputs -| Output | Description | -|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `test-outcome` | Test outcome based on presence of failing tests: *Failed,Passed*
If exceptions are thrown or if it exits early because of argument errors, this is set to Failed. | +| Output | Description | +|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `test-outcome` | Test outcome based on presence of failing tests: *Failed,Passed*
If exceptions are thrown or if it exits early because of argument errors, this is set to Failed. | +| `test-results-truncated` | Flag indicating whether test results were truncated due to markdown exceeding character limit of 65535. | +| `test-results-file-path` | File path for the file that contains the pre-truncated test results in markdown format. This is the same output that is posted in the PR comment. | +| `status-check-id` | The ID of the Status Check that was created. This is only set if `create-status-check` is `true` and a status check was created successfully. | +| `pr-comment-id` | The ID of the PR comment that was created. This is only set if `create-pr-comment` is `true` and a PR was created successfully. | ## Usage Examples @@ -93,7 +105,7 @@ jobs: pull-requests: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: jest test with Coverage continue-on-error: true @@ -103,7 +115,7 @@ jobs: - name: Process jest results with default if: always() # You may also reference just the major or major.minor version - uses: im-open/process-jest-test-results@v2.1.3 + uses: im-open/process-jest-test-results@v2.2.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} results-file: 'src/ProjectWithJestTests/jest-results.json @@ -116,7 +128,7 @@ jobs: advanced-ci: runs-on: [ubuntu-20.04] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: jest test with results file continue-on-error: true @@ -125,7 +137,7 @@ jobs: - name: Process jest results id: process-jest - uses: im-open/process-jest-test-results@v2.1.3 + uses: im-open/process-jest-test-results@v2.2.0 with: github-token: ${{ secrets.GITHUB_TOKEN }} results-file: 'jest.json' @@ -193,7 +205,7 @@ This project has adopted the [im-open's Code of Conduct](https://github.com/im-o ## License -Copyright © 2023, Extend Health, LLC. Code released under the [MIT license](LICENSE). +Copyright © 2024, Extend Health, LLC. Code released under the [MIT license](LICENSE). [Incrementing the Version]: #incrementing-the-version diff --git a/action.yml b/action.yml index a9b388b..29ef2ad 100644 --- a/action.yml +++ b/action.yml @@ -1,45 +1,65 @@ -name: process-jest-test-results - -description: | - Action that parses a jest json results file and creates a status check or pr comment with the results. - Tests are not run as part of these action. - The results can be seen on the workflow run or PR. - -inputs: - github-token: - description: 'Token used to interact with the repository. Generally secrets.GITHUB_TOKEN.' - required: true - results-file: - description: 'The json test results file output by jest.' - required: true - report-name: - description: 'The desired name of the report that is shown on the PR Comment and inside the Status Check.' - required: true - default: Jest Test Results - ignore-test-failures: - description: 'When set to true the status check is set to neutral when there are test failures and it will not block pull requests.' - required: false - default: 'false' - create-status-check: - description: 'Flag indicating whether a status check with test results should be generated.' - required: true - default: 'true' - create-pr-comment: - description: 'Flag indicating whether a PR comment with test results should be generated. When `true` the default behavior is to update an existing comment if one exists.' - required: true - default: 'true' - update-comment-if-one-exists: - description: 'When `create-pr-comment` is true, this flag determines whether a new comment is created or if the action updates an existing comment if one is found which is the default behavior.' - required: true - default: 'true' - timezone: - description: 'IANA time zone name (e.g. America/Denver) to display dates in. If timezone is not provided, dates will be shown in UTC.' - required: false - -outputs: - test-outcome: - description: 'Test outcome based on presence of failing tests: Failed|Passed. If exceptions are thrown or if it exits early because of argument errors, this is set to Failed.' - -runs: - using: 'node16' - main: 'dist/index.js' +name: process-jest-test-results + +description: | + Action that parses a jest json results file and creates a status check or pr comment with the results. + Tests are not run as part of these action. + The results can be seen on the workflow run or PR. + +inputs: + github-token: + description: 'Token used to interact with the repository. Generally `secrets.GITHUB_TOKEN.`' + required: true + results-file: + description: 'The json test results file output by jest.' + required: true + report-name: + description: 'The desired name of the report that is shown on the PR Comment and inside the Status Check.' + required: true + default: Jest Test Results + create-status-check: + description: 'Flag indicating whether a status check with test results should be generated.' + required: true + default: 'true' + ignore-test-failures: + description: | + If there are test failures, the check's conclusion is set to `neutral` so it will not block pull requests. + *Only applicable when `create-status-check` is true.* + required: false + default: 'false' + create-pr-comment: + description: 'Flag indicating whether a PR comment with test results should be generated. When `true` the default behavior is to update an existing comment if one exists.' + required: true + default: 'true' + update-comment-if-one-exists: + description: | + This flag determines whether a new comment is created or if the action updates an existing comment (*if one is found*). + *Only applicable when `create-pr-comment` is true.* + required: true + default: 'true' + comment-identifier: + description: | + A unique identifier which will be added to the generated markdown as a comment (*it will not be visible in the PR comment*). + This identifier enables creating then updating separate results comments on the PR if more than one instance of this action is included in a single job. + This can be helpful when there are multiple test projects that run separately but are part of the same job. + Defaults to GITHUB_JOB_GITHUB_ACTION if not provided. + *Only applicable when `create-pr-comment` is true.* + required: false + timezone: + description: 'IANA time zone name (e.g. America/Denver) to display dates in. If timezone is not provided, dates will be shown in UTC.' + required: false + +outputs: + test-outcome: + description: 'Test outcome based on presence of failing tests: Failed|Passed. If exceptions are thrown or if it exits early because of argument errors, this is set to Failed.' + test-results-truncated: + description: 'Flag indicating whether test results were truncated due to markdown exceeding character limit of 65535.' + test-results-file-path: + description: 'File path for the file that contains the pre-truncated test results in markdown format. This is the same output that is posted in the PR comment.' + status-check-id: + description: 'The ID of the Status Check that was created. This is only set if `create-status-check` is `true` and a status check was created successfully.' + pr-comment-id: + description: 'The ID of the PR comment that was created. This is only set if `create-pr-comment` is `true` and a PR was created successfully.' + +runs: + using: 'node20' + main: 'dist/index.js' diff --git a/dist/index.js b/dist/index.js index 33a056f..48e6696 100644 --- a/dist/index.js +++ b/dist/index.js @@ -2369,6 +2369,7 @@ var require_utils2 = __commonJS({ 'src/utils.js'(exports2, module2) { var core2 = require_core(); var fs = require('fs'); + var path = require('path'); async function readJsonResultsFromFile2(resultsFile2) { core2.info('Reading results from jest results file....'); if (fs.existsSync(resultsFile2)) { @@ -2388,7 +2389,8 @@ var require_utils2 = __commonJS({ } } function areThereAnyFailingTests2(json) { - core2.info(`Checking for failing tests..`); + core2.info(` +Checking for failing tests..`); if (json.numFailedTests > 0) { core2.warning(`At least one failing test was found.`); return true; @@ -2396,9 +2398,26 @@ var require_utils2 = __commonJS({ core2.info(`There are no failing tests.`); return false; } + function createResultsFile2(results, jobAndStep2) { + const resultsFileName = `test-results-${jobAndStep2}.md`; + core2.info(` +Writing results to ${resultsFileName}`); + let resultsFilePath = null; + fs.writeFile(resultsFileName, results, err => { + if (err) { + core2.info(`Error writing results to file. Error: ${err}`); + } else { + core2.info('Successfully created results file.'); + core2.info(`File: ${resultsFileName}`); + } + }); + resultsFilePath = path.resolve(resultsFileName); + return resultsFilePath; + } module2.exports = { readJsonResultsFromFile: readJsonResultsFromFile2, - areThereAnyFailingTests: areThereAnyFailingTests2 + areThereAnyFailingTests: areThereAnyFailingTests2, + createResultsFile: createResultsFile2 }; } }); @@ -16631,37 +16650,50 @@ var require_github2 = __commonJS({ 'src/github.js'(exports2, module2) { var core2 = require_core(); var github = require_github(); - var markupPrefix = ''; async function createStatusCheck2(repoToken, markupData, conclusion, reportName2) { - core2.info(`Creating Status check for ${reportName2}...`); + core2.info(` +Creating Status check for ${reportName2}...`); const octokit = github.getOctokit(repoToken); const git_sha = github.context.eventName === 'pull_request' ? github.context.payload.pull_request.head.sha : github.context.sha; - core2.info(`Creating status check for GitSha: ${git_sha} on a ${github.context.eventName} event.`); + const name = `status check - ${reportName2.toLowerCase()}`; + const status = 'completed'; const checkTime = new Date().toUTCString(); - core2.info(`Check time is: ${checkTime}`); + const summary = `This test run completed at \`${checkTime}\``; + const propMessage = ` Name: ${name} + GitSha: ${git_sha} + Event: ${github.context.eventName} + Status: ${status} + Conclusion: ${conclusion} + Check time: ${checkTime} + Title: ${reportName2} + Summary: ${summary}`; + core2.info(propMessage); + let statusCheckId; await octokit.rest.checks .create({ owner: github.context.repo.owner, repo: github.context.repo.repo, - name: `status check - ${reportName2.toLowerCase()}`, + name, head_sha: git_sha, - status: 'completed', + status, conclusion, output: { title: reportName2, - summary: `This test run completed at \`${checkTime}\``, + summary, text: markupData } }) .then(response => { - core2.info(`Created check: ${response.data.name}`); + core2.info(`Created check: '${response.data.name}' with id '${response.data.id}'`); + statusCheckId = response.data.id; }) .catch(error => { core2.setFailed(`An error occurred trying to create the status check: ${error.message}`); }); + return statusCheckId; } - async function lookForExistingComment(octokit) { + async function lookForExistingComment(octokit, markdownPrefix) { let commentId = null; await octokit .paginate(octokit.rest.issues.listComments, { @@ -16673,7 +16705,7 @@ var require_github2 = __commonJS({ if (comments.length === 0) { core2.info('There are no comments on the PR. A new comment will be created.'); } else { - const existingComment = comments.find(c => c.body.startsWith(markupPrefix)); + const existingComment = comments.find(c => c.body.startsWith(markdownPrefix)); if (existingComment) { core2.info(`An existing comment (${existingComment.id}) was found and will be updated.`); commentId = existingComment.id; @@ -16688,25 +16720,29 @@ var require_github2 = __commonJS({ core2.info(`Finished getting comments for PR #${github.context.payload.pull_request.number}.`); return commentId; } - async function createPrComment2(repoToken, markupData, updateCommentIfOneExists2) { + async function createPrComment2(repoToken, markdown, updateCommentIfOneExists2, commentIdentifier2) { if (github.context.eventName != 'pull_request') { core2.info('This event was not triggered by a pull_request. No comment will be created or updated.'); return; } + const markdownPrefix = ``; + core2.info(`The markdown prefix will be: '${markdownPrefix}'`); const octokit = github.getOctokit(repoToken); + let commentIdToReturn; let existingCommentId = null; if (updateCommentIfOneExists2) { core2.info('Checking for existing comment on PR....'); - existingCommentId = await lookForExistingComment(octokit); + existingCommentId = await lookForExistingComment(octokit, markdownPrefix); } if (existingCommentId) { core2.info(`Updating existing PR #${existingCommentId} comment...`); + commentIdToReturn = existingCommentId; await octokit.rest.issues .updateComment({ owner: github.context.repo.owner, repo: github.context.repo.repo, - body: `${markupPrefix} -${markupData}`, + body: `${markdownPrefix} +${markdown}`, comment_id: existingCommentId }) .then(response => { @@ -16721,17 +16757,19 @@ ${markupData}`, .createComment({ owner: github.context.repo.owner, repo: github.context.repo.repo, - body: `${markupPrefix} -${markupData}`, + body: `${markdownPrefix} +${markdown}`, issue_number: github.context.payload.pull_request.number }) .then(response => { core2.info(`PR comment was created. ID: ${response.data.id}.`); + commentIdToReturn = response.data.id; }) .catch(error => { core2.setFailed(`An error occurred trying to create the PR comment: ${error.message}`); }); } + return commentIdToReturn; } module2.exports = { createStatusCheck: createStatusCheck2, @@ -19599,13 +19637,12 @@ var require_markup = __commonJS({ var { format, utcToZonedTime } = require_date_fns_tz(); var timezone = core2.getInput('timezone') || 'Etc/UTC'; function getMarkupForJson2(results, reportName2) { - return ` - # ${reportName2} - ${getBadge(results)} - ${getTestTimes(results)} - ${getTestCounters(results)} - ${getTestResultsMarkup(results)} - `; + return `# ${reportName2} + +${getBadge(results)} +${getTestTimes(results)} +${getTestCounters(results)} +${getFailedAndEmptyTestResultsMarkup(results)}`; } function getBadge(results) { const failedCount = results.numFailedTests; @@ -19625,6 +19662,10 @@ var require_markup = __commonJS({ } } function getTestTimes(results) { + let hasTests = results.testResults && results.testResults.length > 0; + if (!hasTests) { + return ''; + } let startSeconds = results.startTime; let endSeconds = results.testResults .map(m => m.endTime) @@ -19634,25 +19675,23 @@ var require_markup = __commonJS({ const duration = (endSeconds - startSeconds) / 1e3; let startDate = new Date(startSeconds); let endDate = new Date(endSeconds); - return ` -
- Duration: ${duration} seconds - - - - - - - - - - - - - -
Start:${formatDate(startDate)}
Finish:${formatDate(endDate)}
Duration:${duration} seconds
-
- `; + return `
+ Duration: ${duration} seconds + + + + + + + + + + + + + +
Start:${formatDate(startDate)}
Finish:${formatDate(endDate)}
Duration:${duration} seconds
+
`; } function getTestCounters(results) { let extraProps = getTableRowIfHasValue('Pending Test Suites:', results.numPendingTestSuites); @@ -19660,38 +19699,35 @@ var require_markup = __commonJS({ extraProps += getTableRowIfHasValue('Runtime Error Test Suites:', results.numRuntimeErrorTestSuites); extraProps += getTableRowIfHasValue('TODO Tests:', results.numTodoTests); let outcome = results.success ? 'Passed' : 'Failed'; - return ` -
- Outcome: ${outcome} | Total Tests: ${results.numTotalTests} | Passed: ${results.numPassedTests} | Failed: ${results.numFailedTests} - - - - - - - - - - - - - - - - - - - - - - - - - ${extraProps} -
Total Test Suites:${results.numTotalTestSuites}
Total Tests:${results.numTotalTests}
Failed Test Suites:${results.numFailedTestSuites}
Failed Tests:${results.numFailedTests}
Passed Test Suites:${results.numPassedTestSuites}
Passed Tests:${results.numPassedTests}
-
- - `; + return `
+ Outcome: ${outcome} | Total Tests: ${results.numTotalTests} | Passed: ${results.numPassedTests} | Failed: ${results.numFailedTests} + + + + + + + + + + + + + + + + + + + + + + + + + ${extraProps} +
Total Test Suites:${results.numTotalTestSuites}
Total Tests:${results.numTotalTests}
Failed Test Suites:${results.numFailedTestSuites}
Failed Tests:${results.numFailedTests}
Passed Test Suites:${results.numPassedTestSuites}
Passed Tests:${results.numPassedTests}
+
`; } function getTableRowIfHasValue(heading, data) { if (data > 0) { @@ -19703,7 +19739,7 @@ var require_markup = __commonJS({ } return ''; } - function getTestResultsMarkup(results, reportName2) { + function getFailedAndEmptyTestResultsMarkup(results, reportName2) { let resultsMarkup = ''; if (!results.testResults || results.testResults.length === 0) { return getNoResultsMarkup(reportName2); @@ -19715,43 +19751,43 @@ var require_markup = __commonJS({ failedTests.forEach(failedTest => { resultsMarkup += getFailedTestMarkup(failedTest); }); - return resultsMarkup.trim(); + return resultsMarkup; } } function getNoResultsMarkup(reportName2) { const testResultIcon = ':grey_question:'; const resultsMarkup = ` - ## ${testResultIcon} ${reportName2} - There were no test results to report. - `; +## ${testResultIcon} ${reportName2} + +There were no test results to report. +`; return resultsMarkup; } function getFailedTestMarkup(failedTest) { core2.debug(`Processing ${failedTest.fullName}`); let failedMsg = failedTest.failureMessages.join('\n').replace(/\u001b\[\d{1,2}m/gi, ''); - return ` -
- :x: ${failedTest.fullName} - - - - - - - - - - - - - - - - - -
Title:${failedTest.title}
Status:${failedTest.status}
Location:${failedTest.location}
Failure Messages:
${failedMsg}
-
- `.trim(); + return `
+ :x: ${failedTest.fullName} + + + + + + + + + + + + + + + + + +
Title:${failedTest.title}
Status:${failedTest.status}
Location:${failedTest.location}
Failure Messages:
${failedMsg}
+
+`; } module2.exports = { getMarkupForJson: getMarkupForJson2 @@ -19761,7 +19797,7 @@ var require_markup = __commonJS({ // src/main.js var core = require_core(); -var { readJsonResultsFromFile, areThereAnyFailingTests } = require_utils2(); +var { readJsonResultsFromFile, areThereAnyFailingTests, createResultsFile } = require_utils2(); var { createStatusCheck, createPrComment } = require_github2(); var { getMarkupForJson } = require_markup(); var requiredArgOptions = { @@ -19775,6 +19811,8 @@ var shouldCreateStatusCheck = core.getBooleanInput('create-status-check'); var shouldCreatePRComment = core.getBooleanInput('create-pr-comment'); var updateCommentIfOneExists = core.getBooleanInput('update-comment-if-one-exists'); var reportName = core.getInput('report-name'); +var jobAndStep = `${process.env.GITHUB_JOB}_${process.env.GITHUB_ACTION}`; +var commentIdentifier = core.getInput('comment-identifier') || jobAndStep; async function run() { try { const resultsJson = await readJsonResultsFromFile(resultsFile); @@ -19783,18 +19821,38 @@ async function run() { return; } const failingTestsFound = areThereAnyFailingTests(resultsJson); + core.setOutput('test-outcome', failingTestsFound ? 'Failed' : 'Passed'); const markupData = getMarkupForJson(resultsJson, reportName); - let conclusion = 'success'; - if (!resultsJson.success) { - conclusion = ignoreTestFailures ? 'neutral' : 'failure'; - } if (shouldCreateStatusCheck) { - await createStatusCheck(token, markupData, conclusion, reportName); + let conclusion = 'success'; + if (!resultsJson.success) { + conclusion = ignoreTestFailures ? 'neutral' : 'failure'; + } + const checkId = await createStatusCheck(token, markupData, conclusion, reportName); + core.setOutput('status-check-id', checkId); } if (shouldCreatePRComment) { - await createPrComment(token, markupData, updateCommentIfOneExists); - } - core.setOutput('test-outcome', failingTestsFound ? 'Failed' : 'Passed'); + core.info(` +Creating a PR comment with length ${markupData.length}...`); + const characterLimit = 65535; + let truncated = false; + let mdForComment = markupData; + if (mdForComment.length > characterLimit) { + const message = `Truncating markup data due to character limit exceeded for GitHub API. Markup data length: ${mdForComment.length}/${characterLimit}`; + core.info(message); + truncated = true; + const truncatedMessage = `> [!Important] +> Test results truncated due to character limit. See full report in output. +`; + mdForComment = `${truncatedMessage} +${mdForComment.substring(0, characterLimit - 100)}`; + } + core.setOutput('test-results-truncated', truncated); + const commentId = await createPrComment(token, mdForComment, updateCommentIfOneExists, commentIdentifier); + core.setOutput('pr-comment-id', commentId); + } + const resultsFilePath = createResultsFile(markupData, jobAndStep); + core.setOutput('test-results-file-path', resultsFilePath); } catch (error) { if (error instanceof RangeError) { core.info(error.message); diff --git a/src/github.js b/src/github.js index 5e2061a..58a8437 100644 --- a/src/github.js +++ b/src/github.js @@ -1,41 +1,53 @@ const core = require('@actions/core'); const github = require('@actions/github'); -const markupPrefix = ''; async function createStatusCheck(repoToken, markupData, conclusion, reportName) { - core.info(`Creating Status check for ${reportName}...`); + core.info(`\nCreating Status check for ${reportName}...`); const octokit = github.getOctokit(repoToken); const git_sha = github.context.eventName === 'pull_request' ? github.context.payload.pull_request.head.sha : github.context.sha; - core.info(`Creating status check for GitSha: ${git_sha} on a ${github.context.eventName} event.`); - + const name = `status check - ${reportName.toLowerCase()}`; + const status = 'completed'; const checkTime = new Date().toUTCString(); - core.info(`Check time is: ${checkTime}`); + const summary = `This test run completed at \`${checkTime}\``; + + const propMessage = ` Name: ${name} + GitSha: ${git_sha} + Event: ${github.context.eventName} + Status: ${status} + Conclusion: ${conclusion} + Check time: ${checkTime} + Title: ${reportName} + Summary: ${summary}`; + core.info(propMessage); + let statusCheckId; await octokit.rest.checks .create({ owner: github.context.repo.owner, repo: github.context.repo.repo, - name: `status check - ${reportName.toLowerCase()}`, + name: name, head_sha: git_sha, - status: 'completed', + status: status, conclusion: conclusion, output: { title: reportName, - summary: `This test run completed at \`${checkTime}\``, + summary: summary, text: markupData } }) .then(response => { - core.info(`Created check: ${response.data.name}`); + core.info(`Created check: '${response.data.name}' with id '${response.data.id}'`); + statusCheckId = response.data.id; }) .catch(error => { core.setFailed(`An error occurred trying to create the status check: ${error.message}`); }); + return statusCheckId; } -async function lookForExistingComment(octokit) { +async function lookForExistingComment(octokit, markdownPrefix) { let commentId = null; await octokit @@ -48,7 +60,7 @@ async function lookForExistingComment(octokit) { if (comments.length === 0) { core.info('There are no comments on the PR. A new comment will be created.'); } else { - const existingComment = comments.find(c => c.body.startsWith(markupPrefix)); + const existingComment = comments.find(c => c.body.startsWith(markdownPrefix)); if (existingComment) { core.info(`An existing comment (${existingComment.id}) was found and will be updated.`); commentId = existingComment.id; @@ -66,27 +78,33 @@ async function lookForExistingComment(octokit) { return commentId; } -async function createPrComment(repoToken, markupData, updateCommentIfOneExists) { +async function createPrComment(repoToken, markdown, updateCommentIfOneExists, commentIdentifier) { if (github.context.eventName != 'pull_request') { core.info('This event was not triggered by a pull_request. No comment will be created or updated.'); return; } + const markdownPrefix = ``; + core.info(`The markdown prefix will be: '${markdownPrefix}'`); + const octokit = github.getOctokit(repoToken); + let commentIdToReturn; let existingCommentId = null; if (updateCommentIfOneExists) { core.info('Checking for existing comment on PR....'); - existingCommentId = await lookForExistingComment(octokit); + existingCommentId = await lookForExistingComment(octokit, markdownPrefix); } if (existingCommentId) { core.info(`Updating existing PR #${existingCommentId} comment...`); + commentIdToReturn = existingCommentId; + await octokit.rest.issues .updateComment({ owner: github.context.repo.owner, repo: github.context.repo.repo, - body: `${markupPrefix}\n${markupData}`, + body: `${markdownPrefix}\n${markdown}`, comment_id: existingCommentId }) .then(response => { @@ -101,16 +119,18 @@ async function createPrComment(repoToken, markupData, updateCommentIfOneExists) .createComment({ owner: github.context.repo.owner, repo: github.context.repo.repo, - body: `${markupPrefix}\n${markupData}`, + body: `${markdownPrefix}\n${markdown}`, issue_number: github.context.payload.pull_request.number }) .then(response => { core.info(`PR comment was created. ID: ${response.data.id}.`); + commentIdToReturn = response.data.id; }) .catch(error => { core.setFailed(`An error occurred trying to create the PR comment: ${error.message}`); }); } + return commentIdToReturn; } module.exports = { diff --git a/src/main.js b/src/main.js index ae09313..2bed891 100644 --- a/src/main.js +++ b/src/main.js @@ -1,5 +1,5 @@ const core = require('@actions/core'); -const { readJsonResultsFromFile, areThereAnyFailingTests } = require('./utils'); +const { readJsonResultsFromFile, areThereAnyFailingTests, createResultsFile } = require('./utils'); const { createStatusCheck, createPrComment } = require('./github'); const { getMarkupForJson } = require('./markup'); @@ -16,6 +16,9 @@ const shouldCreatePRComment = core.getBooleanInput('create-pr-comment'); const updateCommentIfOneExists = core.getBooleanInput('update-comment-if-one-exists'); const reportName = core.getInput('report-name'); +const jobAndStep = `${process.env.GITHUB_JOB}_${process.env.GITHUB_ACTION}`; +const commentIdentifier = core.getInput('comment-identifier') || jobAndStep; + async function run() { try { const resultsJson = await readJsonResultsFromFile(resultsFile); @@ -25,22 +28,44 @@ async function run() { } const failingTestsFound = areThereAnyFailingTests(resultsJson); + core.setOutput('test-outcome', failingTestsFound ? 'Failed' : 'Passed'); const markupData = getMarkupForJson(resultsJson, reportName); - let conclusion = 'success'; - if (!resultsJson.success) { - conclusion = ignoreTestFailures ? 'neutral' : 'failure'; - } - if (shouldCreateStatusCheck) { - await createStatusCheck(token, markupData, conclusion, reportName); + let conclusion = 'success'; + if (!resultsJson.success) { + conclusion = ignoreTestFailures ? 'neutral' : 'failure'; + } + const checkId = await createStatusCheck(token, markupData, conclusion, reportName); + core.setOutput('status-check-id', checkId); // This is mainly for testing purposes } + if (shouldCreatePRComment) { - await createPrComment(token, markupData, updateCommentIfOneExists); + core.info(`\nCreating a PR comment with length ${markupData.length}...`); + + // GitHub API has a limit of 65535 characters for a comment so truncate the markup if we need to + const characterLimit = 65535; + let truncated = false; + let mdForComment = markupData; + + if (mdForComment.length > characterLimit) { + const message = `Truncating markup data due to character limit exceeded for GitHub API. Markup data length: ${mdForComment.length}/${characterLimit}`; + core.info(message); + + truncated = true; + const truncatedMessage = `> [!Important]\n> Test results truncated due to character limit. See full report in output.\n`; + mdForComment = `${truncatedMessage}\n${mdForComment.substring(0, characterLimit - 100)}`; + } + core.setOutput('test-results-truncated', truncated); + + const commentId = await createPrComment(token, mdForComment, updateCommentIfOneExists, commentIdentifier); + core.setOutput('pr-comment-id', commentId); // This is mainly for testing purposes } - core.setOutput('test-outcome', failingTestsFound ? 'Failed' : 'Passed'); + // Create this automatically to facilitate testing + const resultsFilePath = createResultsFile(markupData, jobAndStep); + core.setOutput('test-results-file-path', resultsFilePath); } catch (error) { if (error instanceof RangeError) { core.info(error.message); diff --git a/src/markup.js b/src/markup.js index 95cca2f..66a775d 100644 --- a/src/markup.js +++ b/src/markup.js @@ -3,13 +3,12 @@ const { format, utcToZonedTime } = require('date-fns-tz'); const timezone = core.getInput('timezone') || 'Etc/UTC'; function getMarkupForJson(results, reportName) { - return ` - # ${reportName} - ${getBadge(results)} - ${getTestTimes(results)} - ${getTestCounters(results)} - ${getTestResultsMarkup(results)} - `; + return `# ${reportName} + +${getBadge(results)} +${getTestTimes(results)} +${getTestCounters(results)} +${getFailedAndEmptyTestResultsMarkup(results)}`; } function getBadge(results) { @@ -34,6 +33,11 @@ function formatDate(dateToFormat) { } function getTestTimes(results) { + let hasTests = results.testResults && results.testResults.length > 0; + if (!hasTests) { + return ''; + } + let startSeconds = results.startTime; let endSeconds = results.testResults .map(m => m.endTime) @@ -45,25 +49,23 @@ function getTestTimes(results) { let startDate = new Date(startSeconds); let endDate = new Date(endSeconds); - return ` -
- Duration: ${duration} seconds - - - - - - - - - - - - - -
Start:${formatDate(startDate)}
Finish:${formatDate(endDate)}
Duration:${duration} seconds
-
- `; + return `
+ Duration: ${duration} seconds + + + + + + + + + + + + + +
Start:${formatDate(startDate)}
Finish:${formatDate(endDate)}
Duration:${duration} seconds
+
`; } function getTestCounters(results) { @@ -72,38 +74,35 @@ function getTestCounters(results) { extraProps += getTableRowIfHasValue('Runtime Error Test Suites:', results.numRuntimeErrorTestSuites); extraProps += getTableRowIfHasValue('TODO Tests:', results.numTodoTests); let outcome = results.success ? 'Passed' : 'Failed'; - return ` -
- Outcome: ${outcome} | Total Tests: ${results.numTotalTests} | Passed: ${results.numPassedTests} | Failed: ${results.numFailedTests} - - - - - - - - - - - - - - - - - - - - - - - - - ${extraProps} -
Total Test Suites:${results.numTotalTestSuites}
Total Tests:${results.numTotalTests}
Failed Test Suites:${results.numFailedTestSuites}
Failed Tests:${results.numFailedTests}
Passed Test Suites:${results.numPassedTestSuites}
Passed Tests:${results.numPassedTests}
-
- - `; + return `
+ Outcome: ${outcome} | Total Tests: ${results.numTotalTests} | Passed: ${results.numPassedTests} | Failed: ${results.numFailedTests} + + + + + + + + + + + + + + + + + + + + + + + + + ${extraProps} +
Total Test Suites:${results.numTotalTestSuites}
Total Tests:${results.numTotalTests}
Failed Test Suites:${results.numFailedTestSuites}
Failed Tests:${results.numFailedTests}
Passed Test Suites:${results.numPassedTestSuites}
Passed Tests:${results.numPassedTests}
+
`; } function getTableRowIfHasValue(heading, data) { @@ -117,7 +116,7 @@ function getTableRowIfHasValue(heading, data) { return ''; } -function getTestResultsMarkup(results, reportName) { +function getFailedAndEmptyTestResultsMarkup(results, reportName) { let resultsMarkup = ''; if (!results.testResults || results.testResults.length === 0) { @@ -130,16 +129,17 @@ function getTestResultsMarkup(results, reportName) { failedTests.forEach(failedTest => { resultsMarkup += getFailedTestMarkup(failedTest); }); - return resultsMarkup.trim(); + return resultsMarkup; } } function getNoResultsMarkup(reportName) { const testResultIcon = ':grey_question:'; const resultsMarkup = ` - ## ${testResultIcon} ${reportName} - There were no test results to report. - `; +## ${testResultIcon} ${reportName} + +There were no test results to report. +`; return resultsMarkup; } @@ -148,29 +148,28 @@ function getFailedTestMarkup(failedTest) { //Replace an escaped unicode "escape character". It doesn't show correctly in markdown. let failedMsg = failedTest.failureMessages.join('\n').replace(/\u001b\[\d{1,2}m/gi, ''); - return ` -
- :x: ${failedTest.fullName} - - - - - - - - - - - - - - - - - -
Title:${failedTest.title}
Status:${failedTest.status}
Location:${failedTest.location}
Failure Messages:
${failedMsg}
-
- `.trim(); + return `
+ :x: ${failedTest.fullName} + + + + + + + + + + + + + + + + + +
Title:${failedTest.title}
Status:${failedTest.status}
Location:${failedTest.location}
Failure Messages:
${failedMsg}
+
+`; } module.exports = { diff --git a/src/utils.js b/src/utils.js index 35fc1ee..4ac7b35 100644 --- a/src/utils.js +++ b/src/utils.js @@ -1,5 +1,6 @@ const core = require('@actions/core'); const fs = require('fs'); +const path = require('path'); async function readJsonResultsFromFile(resultsFile) { core.info('Reading results from jest results file....'); @@ -19,7 +20,7 @@ async function readJsonResultsFromFile(resultsFile) { } function areThereAnyFailingTests(json) { - core.info(`Checking for failing tests..`); + core.info(`\nChecking for failing tests..`); if (json.numFailedTests > 0) { core.warning(`At least one failing test was found.`); @@ -30,7 +31,26 @@ function areThereAnyFailingTests(json) { return false; } +function createResultsFile(results, jobAndStep) { + const resultsFileName = `test-results-${jobAndStep}.md`; + + core.info(`\nWriting results to ${resultsFileName}`); + let resultsFilePath = null; + + fs.writeFile(resultsFileName, results, err => { + if (err) { + core.info(`Error writing results to file. Error: ${err}`); + } else { + core.info('Successfully created results file.'); + core.info(`File: ${resultsFileName}`); + } + }); + resultsFilePath = path.resolve(resultsFileName); + return resultsFilePath; +} + module.exports = { readJsonResultsFromFile, - areThereAnyFailingTests + areThereAnyFailingTests, + createResultsFile }; diff --git a/test/assert-file-contents-match.sh b/test/assert-file-contents-match.sh new file mode 100755 index 0000000..cb4c192 --- /dev/null +++ b/test/assert-file-contents-match.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +name='' +expectedFileName='' +actualFileName='' + +for arg in "$@"; do + case $arg in + --name) + name=$2 + shift # Remove argument --name from `$@` + shift # Remove argument value from `$@` + ;; + --expectedFileName) + expectedFileName=$2 + shift # Remove argument --expected from `$@` + shift # Remove argument value from `$@` + ;; + --actualFileName) + actualFileName=$2 + shift # Remove argument --actual from `$@` + shift # Remove argument value from `$@` + ;; + + esac +done + +echo " +Asserting file contents match: +Expected file name: '$expectedFileName' +Actual file name: '$actualFileName'" + +# First make sure the actual file exists +if [ -f "$actualFileName" ] +then + echo " +$actualFileName exists which is expected." + actualFileContents=$(cat $actualFileName) +else + echo " +$actualFileName does not exist which is not expected" + exit 1 +fi +expectedFileContents=$(cat $expectedFileName) + + +# Then compare the contents +name="file contents" +echo " +Expected $name: '$expectedFileContents' +Actual $name: '$actualFileContents'" + +if [ "$expectedFileContents" != "$actualFileContents" ]; then + echo "The expected $name does not match the actual $name." + exit 1 +else + echo "The expected and actual $name values match." +fi \ No newline at end of file diff --git a/test/assert-pr-comment-exists.js b/test/assert-pr-comment-exists.js new file mode 100644 index 0000000..f01d1ca --- /dev/null +++ b/test/assert-pr-comment-exists.js @@ -0,0 +1,35 @@ +module.exports = async (github, context, core, commentId) => { + core.info(`\nAsserting that PR Comment with the following id exists: '${commentId}'`); + + let actualComment; + + if (!commentId || commentId.trim() === '') { + core.setFailed(`The comment id provided was empty.`); + } + + const commentResponse = await github.rest.issues.getComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: commentId.trim() + }); + + if (!commentResponse && !commentResponse.data) { + core.setFailed(`Comment ${commentId} does not appear to exist.`); + } else { + core.info(`Comment ${commentId} exists.`); + let rawComment = commentResponse.data; + + actualComment = { + id: rawComment.id, + body: rawComment.body, + createdAt: rawComment.created_at, + updatedAt: rawComment.updated_at, + issueUrl: rawComment.issue_url + }; + core.startGroup(`Comment ${actualComment.id} details:`); + console.log(actualComment); + core.endGroup(); + } + + return actualComment; +}; diff --git a/test/assert-pr-comment-matches-expectations.js b/test/assert-pr-comment-matches-expectations.js new file mode 100644 index 0000000..e69436b --- /dev/null +++ b/test/assert-pr-comment-matches-expectations.js @@ -0,0 +1,75 @@ +module.exports = async (core, actualComment, actualTestResults, expectedComment) => { + function assertCreatedAndUpdatedMatch(created, updated) { + core.info(`\n\tCreated: '${created}'`); + core.info(`\tUpdated: '${updated}'`); + + if (created != updated) { + core.setFailed(`\tThe created and updated dates do not match, which is NOT expected.`); + } else { + core.info(`\tThe created and updated dates match, which is expected.`); + } + } + + function assertUpdatedIsAfterCreated(created, updated) { + core.info(`\n\tCreated: '${created}'`); + core.info(`\tUpdated: '${updated}'`); + + if (created >= updated) { + core.setFailed(`\tThe created date is on or after the updated date, which is NOT expected.`); + } else { + core.info(`\tThe created date is before the updated date, which is expected.`); + } + } + + function assertValueContainsSubstring(valueName, value, substringName, substring) { + if (value.includes(substring)) { + core.info(`\n\tChecking ${valueName} contains the ${substringName} substring.`); + core.info(`\tThe ${valueName} string contains the substring, which is expected.`); + } else { + core.info(`\n\tChecking ${valueName} contains the ${substringName} substring.`); + core.setFailed(`\tThe ${valueName} string does not contain the ${substringName} substring, which is not expected.`); + core.startGroup(`\t${valueName} contents:`); + core.info(`'${value}'`); + core.endGroup(); + + core.startGroup(`\t${substringName} contents:`); + core.info(`'${substring}'`); + core.endGroup(); + } + } + + function validateProps() { + core.info(`\nAsserting that PR Comment properties match the expected values.`); + + const expectedPrefix = expectedComment.prefix; + const expectedFullMd = expectedComment.fullMarkdown; + const expectedTruncatedMd = expectedComment.truncatedMarkdown; + const isTruncated = expectedComment.truncated; + + // Check the actual comment's body + assertValueContainsSubstring('PR Comment', actualComment.body, 'Expected Prefix', expectedPrefix); + if (isTruncated) { + assertValueContainsSubstring('PR Comment', actualComment.body, 'Expected Body', expectedTruncatedMd); + } else { + assertValueContainsSubstring('PR Comment', actualComment.body, 'Expected Body', expectedFullMd); + } + + // Check the test-results.md file + assertValueContainsSubstring('test-results.md', actualTestResults, 'Expected Body', expectedFullMd); + + // Doublecheck the timestamps are generally what we expected based on created/updated status + switch (expectedComment.action) { + case 'updated': + assertUpdatedIsAfterCreated(actualComment.createdAt, actualComment.updatedAt); + break; + case 'created': + assertCreatedAndUpdatedMatch(actualComment.createdAt, actualComment.updatedAt); + break; + default: + core.setFailed(`The action '${expectedComment.action}' is not supported.`); + break; + } + } + + validateProps(); +}; diff --git a/test/assert-status-check-exists.js b/test/assert-status-check-exists.js new file mode 100644 index 0000000..3fb16cc --- /dev/null +++ b/test/assert-status-check-exists.js @@ -0,0 +1,41 @@ +module.exports = async (github, context, core, statusCheckId) => { + core.info(`\nAsserting that status check '${statusCheckId} exists`); + + if (!statusCheckId || statusCheckId.trim() === '') { + core.setFailed('The statusCheckId was not provided'); + return; + } + + let statusCheckToReturn; + await github.rest.checks + .get({ + owner: context.repo.owner, + repo: context.repo.repo, + check_run_id: statusCheckId.trim() + }) + .then(checkResponse => { + core.info(`Status Check ${statusCheckId} exists.`); + const rawCheck = checkResponse.data; + + statusCheckToReturn = { + id: rawCheck.id, + name: rawCheck.name, + status: rawCheck.status, + conclusion: rawCheck.conclusion, + startedAt: rawCheck.started_at, + completedAt: rawCheck.completed_at, + title: rawCheck.output.title, + summary: rawCheck.output.summary, + prNumber: rawCheck.pull_requests.length > 0 ? rawCheck.pull_requests[0].number : null, + text: rawCheck.output.text + }; + core.startGroup(`Check ${statusCheckId} details:`); + console.log(statusCheckToReturn); + core.endGroup(); + }) + .catch(error => { + core.setFailed(`An error occurred retrieving status check ${statusCheckId}. Error: ${error.message}`); + }); + + return statusCheckToReturn; +}; diff --git a/test/assert-status-check-matches-expectations.js b/test/assert-status-check-matches-expectations.js new file mode 100644 index 0000000..cb5cbb1 --- /dev/null +++ b/test/assert-status-check-matches-expectations.js @@ -0,0 +1,43 @@ +module.exports = async (core, statusCheck, expectedValues) => { + function assertValuesMatch(variableName, expectedValue, actualValue) { + core.info(`\n\tExpected ${variableName}: '${expectedValue}'`); + core.info(`\tActual ${variableName}: '${actualValue}'`); + + if (expectedValue != actualValue) { + core.setFailed(`\tThe expected ${variableName} does not match the actual ${variableName}.`); + } else { + core.info(`\tThe expected and actual ${variableName} values match.`); + } + } + + function assertValueContainsSubstring(valueName, value, substringName, substring) { + if (value.includes(substring)) { + core.info(`\n\tChecking ${valueName} contains the ${substringName} substring.`); + core.info(`\tThe ${valueName} string contains the substring.`); + } else { + core.info(`\n\tChecking ${valueName} contains the ${substringName} substring.`); + core.setFailed(`\tThe ${valueName} string does not contain the ${substringName} substring.`); + core.startGroup('\tString and substring Details'); + core.info(`\n\t${valueName}: '${value}'`); + core.info(`\t${substringName}: '${substring}'`); + core.endGroup(); + } + } + + function validateProps() { + core.info(`\nAsserting that Status Check properties match the expected values.`); + core.info(`Status Check: ${statusCheck.id}`); + + assertValuesMatch('Name', expectedValues['name'], statusCheck.name); + assertValuesMatch('Status', expectedValues['status'], statusCheck.status); + assertValuesMatch('Conclusion', expectedValues['conclusion'], statusCheck.conclusion); + assertValuesMatch('Title', expectedValues['title'], statusCheck.title); + assertValuesMatch('Text', expectedValues['text'], statusCheck.text); + + // The summary should be something like: 'This test run completed at `Wed, 21 Feb 2024 20:21:48 GMT`' + // so just check that it contains the static portion. + assertValueContainsSubstring('Summary', statusCheck.summary, 'Partial Test Run Text', 'This test run completed at `'); + } + + validateProps(); +}; diff --git a/test/assert-value-is-empty.sh b/test/assert-value-is-empty.sh new file mode 100755 index 0000000..ae9a305 --- /dev/null +++ b/test/assert-value-is-empty.sh @@ -0,0 +1,32 @@ +#!/bin/bash + + +name='' +value='' + +for arg in "$@"; do + case $arg in + --name) + name=$2 + shift # Remove argument --name from `$@` + shift # Remove argument value from `$@` + ;; + --value) + value=$2 + shift # Remove argument --expected from `$@` + shift # Remove argument value from `$@` + ;; + esac +done + +echo " +Asserting $name is empty +$name value: '$value'" + +if [ -z "$value" ] +then + echo "$name is empty which is expected." +else + echo "$name is not empty which is not expected." + exit 1 +fi \ No newline at end of file diff --git a/test/assert-value-is-not-empty.sh b/test/assert-value-is-not-empty.sh new file mode 100755 index 0000000..cdf06b1 --- /dev/null +++ b/test/assert-value-is-not-empty.sh @@ -0,0 +1,32 @@ +#!/bin/bash + + +name='' +value='' + +for arg in "$@"; do + case $arg in + --name) + name=$2 + shift # Remove argument --name from `$@` + shift # Remove argument value from `$@` + ;; + --value) + value=$2 + shift # Remove argument --expected from `$@` + shift # Remove argument value from `$@` + ;; + esac +done + +echo " +Asserting $name is not empty +$name value: '$value'" + +if [ -z "$value" ] +then + echo "$name is empty which is not expected." + exit 1 +else + echo "$name is not empty which is expected." +fi \ No newline at end of file diff --git a/test/assert-values-do-not-match.sh b/test/assert-values-do-not-match.sh new file mode 100755 index 0000000..b8371fe --- /dev/null +++ b/test/assert-values-do-not-match.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +name='' +value1='' +value2='' + +for arg in "$@"; do + case $arg in + --name) + name=$2 + shift # Remove argument --name from `$@` + shift # Remove argument value from `$@` + ;; + --value1) + value1=$2 + shift # Remove argument --expected from `$@` + shift # Remove argument value from `$@` + ;; + --value2) + value2=$2 + shift # Remove argument --actual from `$@` + shift # Remove argument value from `$@` + ;; + + esac +done + +echo " +Asserting $name values do not match +$name 1: '$value1' +$name 2: '$value2'" + +if [ "$value1" != "$value2" ]; then + echo "$name 1 does not match $name 2, which is expected." +else + echo "Values 1 and 2 match, which is not expected." + exit 1 +fi \ No newline at end of file diff --git a/test/assert-values-match.sh b/test/assert-values-match.sh new file mode 100755 index 0000000..c3881dd --- /dev/null +++ b/test/assert-values-match.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +name='' +expectedValue='' +actualValue='' + +for arg in "$@"; do + case $arg in + --name) + name=$2 + shift # Remove argument --name from `$@` + shift # Remove argument value from `$@` + ;; + --expected) + expectedValue=$2 + shift # Remove argument --expected from `$@` + shift # Remove argument value from `$@` + ;; + --actual) + actualValue=$2 + shift # Remove argument --actual from `$@` + shift # Remove argument value from `$@` + ;; + + esac +done + +echo " +Asserting $name values match +Expected $name: '$expectedValue' +Actual $name: '$actualValue'" + +if [ "$expectedValue" != "$actualValue" ]; then + echo "The expected $name does not match the actual $name." + exit 1 +else + echo "The expected and actual $name values match." +fi \ No newline at end of file diff --git a/test/delete-pr-comment.js b/test/delete-pr-comment.js new file mode 100644 index 0000000..59f6915 --- /dev/null +++ b/test/delete-pr-comment.js @@ -0,0 +1,24 @@ +module.exports = async (github, context, core, commentId) => { + core.info(`\nDeleting comment '${commentId}'`); + + if (!commentId) { + core.setFailed(`The comment id provided was empty.`); + } + + await github + .request(`DELETE /repos/{owner}/{repo}/issues/comments/{comment_id}`, { + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: commentId, + headers: { + 'X-GitHub-Api-Version': '2022-11-28' + } + }) + .then(() => { + core.info(`The comment '${commentId}' was deleted successfully.`); + }) + .catch(error => { + core.info(`An error occurred deleting comment '${commentId}'. Error: ${error.message}`); + core.info(`You may need to manually clean up the PR comments.`); + }); +}; diff --git a/test/delete-pre-existing-comments.js b/test/delete-pre-existing-comments.js new file mode 100644 index 0000000..6e94b48 --- /dev/null +++ b/test/delete-pre-existing-comments.js @@ -0,0 +1,66 @@ +module.exports = async (github, context, core) => { + async function lookForExistingComments(github, context, core, prNum) { + const markupPrefix = `