Skip to content

Workflow file for this run

name: GPU benchmark on `main`
on:
push:
branches:
- main
jobs:
benchmark:
name: Bench and deploy
runs-on: ubuntu-latest
steps:
# Install deps
- uses: actions/checkout@v4
with:
repository: lurk-lab/ci-workflows
- uses: ./.github/actions/ci-env
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@v2
with:
tool: [email protected]
# Run benchmarks and deploy
- name: Get old benchmarks
uses: actions/checkout@v4
with:
ref: gh-pages
path: gh-pages
- name: Install criterion
run: cargo install cargo-criterion
- name: Copy old benchmarks locally for comparison
run: |
mkdir -p target gh-pages/benchmarks/criterion history
cp -r gh-pages/benchmarks/criterion target
- name: Set env vars
run: |
echo "SHORT_SHA=$(git rev-parse --short HEAD)" | tee -a $GITHUB_ENV
echo "LURK_BENCH_OUTPUT=gh-pages" | tee -a $GITHUB_ENV
echo "LURK_RC=100,600" | tee -a $GITHUB_ENV
# TODO: Check env vars work
- name: Run benchmarks
run: |
just bench fibonacci
cp fibonacci-${{ env.SHORT_SHA }}.json ..
working-directory: ${{ github.workspace }}/benches
# TODO: Prettify labels for easier viewing
# Compress the benchmark file and metadata for later analysis
- name: Compress artifacts
run: |
echo $LABELS > labels.md
tar -cvzf fibonacci-${{ env.SHORT_SHA }}.tar.gz Cargo.lock fibonacci-${{ env.SHORT_SHA }}.json labels.md
# Outputs plots to `./history`
- name: Generate historical performance plot
run: |
cargo run
- name: Copy benchmark json to history
run: cp *.png fibonacci-${{ env.SHORT_SHA }}.tar.gz history/
- name: Deploy latest benchmark report
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./target/criterion
destination_dir: benchmarks/criterion
- name: Deploy benchmark history
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: history
destination_dir: benchmarks/history
keep_files: true