Skip to content

Build and Review PR #8 #4

Build and Review PR #8

Build and Review PR #8 #4

name: Build and Review PR
run-name: 'Build and Review PR #${{ github.event.pull_request.number }}'
on:
# https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token
#
# This workflow uses the pull_request trigger which prevents write permissions on the
# GH_TOKEN and secrets access from public forks. This should remain as a pull_request
# trigger to minimize the access public forks have in the repository. The reduced
# permissions are adequate but do mean that re-compiles and readme changes will have to be
# made manually by the PR author. These auto-updates could be done by this workflow
# for branches but in order to re-trigger a PR build (which is needed for status checks),
# we would make the commits with a different user and their PAT. To minimize exposure
# and complication we will request those changes be manually made by the PR author.
pull_request:
types: [opened, synchronize, reopened]
# paths:
# Do not include specific paths here. We always want this build to run and produce a
# status check which are branch protection rules can use. If this is skipped because of
# path filtering, a status check will not be created and we won't be able to merge the PR
# without disabling that requirement. If we have a status check that is always produced,
# we can also use that to require all branches be up to date before they are merged.
jobs:
build-and-review-pr:
# This reusable workflow will check to see if an action's source code has changed based on
# whether the PR includes files that match the files-with-code arg or are in one of the
# dirs-with-code directories. If there are source code changes, this reusable workflow
# will then run the action's build (if one was provided) and update the README.md with the
# the latest version of the action. If those two steps result in any changes that need to
# be committed, the workflow will fail because the PR needs some updates. Instructions for
# updating the PR will be available in the build log, the workflow summary and as a PR
# comment if the PR came from a branch (not a fork).
# This workflow assumes:
# - The main README.md is at the root of the repo
# - The README contains a contribution guidelines and usage examples section
uses: im-open/.github/.github/workflows/reusable-build-and-review-pr.yml@v1
with:
action-name: ${{ github.repository }}
default-branch: main
readme-name: 'README.md'
# The id of the contribution guidelines section of the README.md
readme-contribution-id: '#contributing'
# The id of the usage examples section of the README.md
readme-examples-id: '#usage-examples'
# The files that contain source code for the action. Only files that affect the action's execution
# should be included like action.yml or package.json. Do not include files like README.md or .gitignore.
# Files do not need to be explicitly provided here if they fall under one of the dirs in dirs-with-code.
# ** This value must match the same files-with-code argument specified in increment-version-on-merge.yml.
files-with-code: 'action.yml,package.json,package-lock.json'
# The directories that contain source code for the action. Only dirs with files that affect the action's
# execution should be included like src or dist. Do not include dirs like .github or node_modules.
# ** This value must match the same dirs-with-code argument specified in increment-version-on-merge.yml.
dirs-with-code: 'src,dist'
# The npm script to run to build the action. This is typically 'npm run build' if the
# action needs to be compiled. For composite-run-steps actions this is typically empty.
build-command: 'npm run build'
unit-tests:
runs-on: ubuntu-latest
env:
REPORTS_INPUT_VALID: './test/input-files/valid/*.xml'
REPORTS_INPUT_BAD_DIR: './test/input-files/bad-dir/*.xml'
REPORTS_INPUT_BAD_PATTERN: './test/input-files/valid/*.opncvr.xml'
TARGETDIR_INPUT_VERBOSITY: './test-results/invalid-verbosity'
TARGETDIR_INPUT_REPORTTYPES: './test-results/invalid-reporttypes'
TARGETDIR_INPUT_DOES_NOT_EXIST: './test-results/dir-does-not-exist'
TARGETDIR_INPUT_BAD_PATTERN: './test-results/bad-pattern'
TARGETDIR_INPUT_INVALID: './test-results/invalid-xml'
TARGETDIR_INPUT_MATCHING: './test-results/matching-reports'
TARGETDIR_INPUT_ASSEMBLY_FILTERS: './test-results/assembly-filter'
TARGETDIR_INPUT_CLASS_FILTERS: './test-results/class-filter'
TARGETDIR_INPUT_FILE_FILTERS: './test-results/file-filter'
ERROR_REASON_OUTPUT_VERBOSITY: 'Invalid verbosity'
ERROR_REASON_OUTPUT_REPORTTYPES: 'Invalid report type'
ERROR_REASON_OUTPUT_NO_MATCHING_FILES: 'No matching files found'
ERROR_REASON_OUTPUT_TOOL: 'Failed to execute ReportGenerator.exe'
ERROR_REASON_OUTPUT_ACTION: 'Failed to execute the action'
ACTUAL_CONTENTS_MATCHING: './test-results/matching-reports/Summary.md' # MarkdownSummary
ACTUAL_CONTENTS_ASSEMBLY_FILTERS: './test-results/assembly-filters/Summary.xml' # XmlSummary
ACTUAL_CONTENTS_CLASS_FILTERS: './test-results/class-filters/Summary.json' # JsonSummary
ACTUAL_CONTENTS_FILE_FILTERS: './test-results/file-filters/Summary.txt' # TextSummary
EXPECTED_CONTENTS_MATCHING: './test/expected-contents/matching-reports.md'
EXPECTED_CONTENTS_ASSEMBLY_FILTERS: './test/expected-contents/assembly-filters.xml'
EXPECTED_CONTENTS_CLASS_FILTERS: './test/expected-contents/class-filters.json'
EXPECTED_CONTENTS_FILE_FILTERS: './test/expected-contents/file-filters.txt'
steps:
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' SETUP '
run: echo ""
- name: Setup - Checkout the action
uses: actions/checkout@v4
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 1 - INVALID VERBOSITY INPUT '
run: echo ""
- name: 1a - When code-coverage-report-generator is called with an invalid 'verbosity' input
id: invalid-verbosity
if: always()
continue-on-error: true # This is needed because we expect the step to fail but we need it to "pass" in order for the test job to succeed.
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_VALID }}'
targetdir: '${{ env.TARGETDIR_INPUT_VERBOSITY }}'
reporttypes: 'MarkdownSummary'
verbosity: 'InVaLId_VeRbOsItY'
- name: 1b - Then the action outcome should be failure
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "failure" --actual "${{ steps.invalid-verbosity.outcome }}"
- name: 1c - And the 'error-reason' output should be '${{ env.ERROR_REASON_OUTPUT_VERBOSITY }}'
if: always()
run: ./test/assert-values-match.sh --name "error-reason output" --expected "${{ env.ERROR_REASON_OUTPUT_VERBOSITY }}" --actual "${{ steps.invalid-verbosity.outputs.error-reason }}"
- name: 1d - And the target directory should not exist
if: always()
run: ./test/assert-dir-does-not-exist.sh --dir "${{ env.TARGETDIR_INPUT_VERBOSITY }}"
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 2 - INVALID REPORTTYPES INPUT '
run: echo ""
- name: 2a - When code-coverage-report-generator is called with an invalid 'reporttypes' input
id: invalid-reporttypes
if: always()
continue-on-error: true # This is needed because we expect the step to fail but we need it to "pass" in order for the test job to succeed.
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_VALID }}'
targetdir: '${{ env.TARGETDIR_INPUT_REPORTTYPES }}'
reporttypes: 'BarkdownSummary'
verbosity: 'Info'
- name: 2b - Then the action outcome should be failure
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "failure" --actual "${{ steps.invalid-reporttypes.outcome }}"
- name: 2c - And the 'error-reason' output should be '${{ env.ERROR_REASON_OUTPUT_REPORTTYPES }}'
if: always()
run: ./test/assert-values-match.sh --name "error-reason output" --expected "${{ env.ERROR_REASON_OUTPUT_REPORTTYPES }}" --actual "${{ steps.invalid-reporttypes.outputs.error-reason }}"
- name: 2d - And the target directory should not exist
if: always()
run: ./test/assert-dir-does-not-exist.sh --dir "${{ env.TARGETDIR_INPUT_REPORTTYPES }}"
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 3 - BAD REPORTS INPUT (DIR DOES NOT EXIST) '
run: echo ""
- name: 3a - When code-coverage-report-generator is called with a 'reports' input dir that does not exist
id: dir-does-not-exist
if: always()
continue-on-error: true # This is needed because we expect the step to fail but we need it to "pass" in order for the test job to succeed.
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_BAD_DIR }}'
targetdir: '${{ env.TARGETDIR_INPUT_DOES_NOT_EXIST }}'
reporttypes: 'MarkdownSummary'
verbosity: 'Info'
- name: 3b - Then the action outcome should be failure
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "failure" --actual "${{ steps.dir-does-not-exist.outcome }}"
- name: 3c - And the 'error-reason' output should be '${{ env.ERROR_REASON_OUTPUT_NO_MATCHING_FILES }}'
if: always()
run: ./test/assert-values-match.sh --name "error-reason output" --expected "${{ env.ERROR_REASON_OUTPUT_NO_MATCHING_FILES }}" --actual "${{ steps.dir-does-not-exist.outputs.error-reason }}"
- name: 3d - And the target directory should not exist
if: always()
run: ./test/assert-dir-does-not-exist.sh --dir "${{ env.TARGETDIR_INPUT_DOES_NOT_EXIST }}"
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 4 - BAD REPORTS INPUT (PATTERN DOES NOT MATCH ANY FILES) '
run: echo ""
- name: 4a - When code-coverage-report-generator is called with a 'reports' input pattern that does not match any files
id: bad-pattern
if: always()
continue-on-error: true # This is needed because we expect the step to fail but we need it to "pass" in order for the test job to succeed.
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_BAD_PATTERN }}'
targetdir: '${{ env.TARGETDIR_INPUT_BAD_PATTERN }}'
reporttypes: 'MarkdownSummary'
verbosity: 'Info'
- name: 4b - Then the action outcome should be failure
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "failure" --actual "${{ steps.bad-pattern.outcome }}"
- name: 4c - And the 'error-reason' output should be '${{ env.ERROR_REASON_OUTPUT_NO_MATCHING_FILES }}'
if: always()
run: ./test/assert-values-match.sh --name "error-reason output" --expected "${{ env.ERROR_REASON_OUTPUT_NO_MATCHING_FILES }}" --actual "${{ steps.bad-pattern.outputs.error-reason }}"
- name: 4d - And the target directory should not exist
if: always()
run: ./test/assert-dir-does-not-exist.sh --dir "${{ env.TARGETDIR_INPUT_BAD_PATTERN }}"
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 5 - MATCHING REPORTS '
run: echo ""
- name: 5a - When code-coverage-report-generator is called with a 'reports' input pattern that matches files
id: matching-reports
if: always()
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_VALID }}'
targetdir: '${{ env.TARGETDIR_INPUT_MATCHING }}'
reporttypes: ' MarkDoWNSummary;Html '
verbosity: ' InFo '
title: 'MyProj Code Coverage'
tag: 'matching-tag'
- name: 5b - Then the action outcome should be success
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.matching-reports.outcome }}"
- name: 5c - And the 'error-reason' output should be empty
if: always()
run: ./test/assert-value-is-empty.sh --name "error-reason output" --value "${{ steps.matching-reports.outputs.error-reason }}"
- name: 5d - And the target directory should exist
if: always()
run: ./test/assert-dir-exists.sh --dir "${{ env.TARGETDIR_INPUT_MATCHING }}"
- name: 5e - And '${{ env.ACTUAL_CONTENTS_MATCHING }}' should contain the expected contents
if: always()
run: |
# Comparing the Summary.md file will ensure that:
# - The provided tag is included
# - The title is not included (reporttype=MarkDoWNSummary does not support it)
# - None of the assemblies/classes/files have been filtered out because those inputs were not supplied
actualFile="${{ env.ACTUAL_CONTENTS_MATCHING }}"
expectedContentsFile="${{ env.EXPECTED_CONTENTS_MATCHING }}"
./test/assert-file-contains-expected-content.sh --fileName $actualFile --expectedContents $expectedContentsFile
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 6 - ASSEMBLY FILTERS '
run: echo ""
- name: 6a - When code-coverage-report-generator is called with assembly filters
id: assembly-filters
if: always()
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_VALID }}'
targetdir: '${{ env.TARGETDIR_INPUT_ASSEMBLY_FILTERS }}'
reporttypes: 'XmlSummary'
title: 'MyProj Code Coverage AssemblyFilter'
assemblyfilters: '+MyProj*;-MyProj.Tests.*'
tag: 'assembly-filter-tag'
- name: 6b - Then the action outcome should be success
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.assembly-filters.outcome }}"
- name: 6c - And the 'error-reason' output should be empty
if: always()
run: ./test/assert-value-is-empty.sh --name "error-reason output" --value "${{ steps.assembly-filters.outputs.error-reason }}"
- name: 6d - And the target directory should exist
if: always()
run: ./test/assert-dir-exists.sh --dir "${{ env.TARGETDIR_INPUT_ASSEMBLY_FILTERS }}"
- name: 6e - And '${{ env.ACTUAL_CONTENTS_ASSEMBLY_FILTERS }}' should contain the expected contents
if: always()
run: |
# Comparing the Summary.xml file will ensure that:
# - The tag is included (reporttype=XmlSummary supports it)
# - The title is included (reporttype=XmlSummary supports it)
# - The assemblies have been filtered
actualFile="${{ env.ACTUAL_CONTENTS_ASSEMBLY_FILTERS }}"
expectedContentsFile="${{ env.EXPECTED_CONTENTS_ASSEMBLY_FILTERS }}"
./test/assert-file-contains-expected-content.sh --fileName $actualFile --expectedContents $expectedContentsFile
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 7 - CLASS FILTERS '
run: echo ""
- name: 7a - When code-coverage-report-generator is called with class filters
id: class-filters
if: always()
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_VALID }}'
targetdir: '${{ env.TARGETDIR_INPUT_CLASS_FILTERS }}'
reporttypes: 'JsonSummary'
classfilters: '-MyProj.Tests.*;-MyProj.Core*'
tag: 'class-filter-tag'
- name: 7b - Then the action outcome should be success
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.class-filters.outcome }}"
- name: 7c - And the 'error-reason' output should be empty
if: always()
run: ./test/assert-value-is-empty.sh --name "error-reason output" --value "${{ steps.class-filters.outputs.error-reason }}"
- name: 7d - And the target directory should exist
if: always()
run: ./test/assert-dir-exists.sh --dir "${{ env.TARGETDIR_INPUT_CLASS_FILTERS }}"
- name: 7e - And '${{ env.ACTUAL_CONTENTS_CLASS_FILTERS }}' should contain the expected contents
if: always()
run: |
# Comparing the Summary.json file will ensure that:
# - No tag is included (reporttype=JsonSummary does not support it)
# - No title is included (the input was not supplied)
# - The classes have been filtered
actualFile="${{ env.ACTUAL_CONTENTS_CLASS_FILTERS }}"
expectedContentsFile="${{ env.EXPECTED_CONTENTS_CLASS_FILTERS }}"
./test/assert-file-contains-expected-content.sh --fileName $actualFile --expectedContents $expectedContentsFile
- name: '-------------------------------------------------------------------------------------------------------------'
run: echo ""
- name: ' TEST 8 - FILE FILTERS '
run: echo ""
- name: 8a - When code-coverage-report-generator is called with file filters
id: file-filters
if: always()
uses: ./
with:
reports: '${{ env.REPORTS_INPUT_VALID }}'
targetdir: '${{ env.TARGETDIR_INPUT_FILE_FILTERS }}'
reporttypes: 'TextSummary'
filefilters: '+*EnumExtensions.cs'
tag: 'file-filter-tag'
- name: 8b - Then the action outcome should be success
if: always()
run: ./test/assert-values-match.sh --name "step outcome" --expected "success" --actual "${{ steps.file-filters.outcome }}"
- name: 8c - And the 'error-reason' output should be empty
if: always()
run: ./test/assert-value-is-empty.sh --name "error-reason output" --value "${{ steps.file-filters.outputs.error-reason }}"
- name: 8d - And the target directory should exist
if: always()
run: ./test/assert-dir-exists.sh --dir "${{ env.TARGETDIR_INPUT_FILE_FILTERS }}"
- name: 8e - And '${{ env.ACTUAL_CONTENTS_FILE_FILTERS }}' should contain the expected contents
if: always()
run: |
# Comparing the Summary.txt file will ensure that:
# - No tag is included (reporttype=TextSummary does not support it)
# - No title is included (the input was not supplied)
# - The files have been filtered
actualFile="${{ env.ACTUAL_CONTENTS_FILE_FILTERS }}"
expectedContentsFile="${{ env.EXPECTED_CONTENTS_FILE_FILTERS }}"
./test/assert-file-contains-expected-content.sh --fileName $actualFile --expectedContents $expectedContentsFile