diff --git a/.github/workflows/Benchmark.yml b/.github/workflows/Benchmark.yml new file mode 100644 index 0000000..36e2d61 --- /dev/null +++ b/.github/workflows/Benchmark.yml @@ -0,0 +1,30 @@ +name: Run benchmarks +on: + pull_request: + types: [labeled, opened, synchronize, reopened] +# Only trigger the benchmark job when you add `run benchmark` label to the PR +jobs: + Benchmark: + runs-on: ubuntu-latest + if: contains(github.event.pull_request.labels.*.name, 'run benchmark') + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@latest + - name: Cache artifacts + uses: actions/cache@v1 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + restore-keys: | + ${{ runner.os }}-test-${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- + - name: Install dependencies + run: julia -e 'using Pkg; pkg"add JSON PkgBenchmark BenchmarkCI@0.1"' + - name: Run benchmarks + run: julia benchmark/run_benchmarks.jl + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + \ No newline at end of file diff --git a/.gitignore b/.gitignore index b0c4baa..6ed2db7 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ Manifest.toml /docs/build/ /docs/src/generated/ +/.benchmarkci +/benchmark/*.json .vscode *.DS_Store \ No newline at end of file diff --git a/benchmark/Project.toml b/benchmark/Project.toml new file mode 100644 index 0000000..f61ac24 --- /dev/null +++ b/benchmark/Project.toml @@ -0,0 +1,6 @@ +[deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +ImageCore = "a09fc81d-aa75-5fe9-8630-4744c3626534" +ImageMagick = "6218d12a-5da1-5696-b52f-db25d2ecc6d1" +PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" +TestImages = "5e47fb64-e119-507b-a336-dd2b206d9990" diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl new file mode 100644 index 0000000..fdf53e1 --- /dev/null +++ b/benchmark/benchmarks.jl @@ -0,0 +1,48 @@ +using BenchmarkTools +using DitherPunk +using TestImages +using ImageCore + +on_CI = haskey(ENV, "GITHUB_ACTIONS") + +img_gray = testimage("fabio_gray_256") +img_color = testimage("fabio_color_256") + +## Define color scheme +white = RGB{Float32}(1, 1, 1) +yellow = RGB{Float32}(1, 1, 0) +green = RGB{Float32}(0, 0.5, 0) +orange = RGB{Float32}(1, 0.5, 0) +red = RGB{Float32}(1, 0, 0) +blue = RGB{Float32}(0, 0, 1) +cs = [white, yellow, green, orange, red, blue] + +# Use one representative algorithm of each type +algs = Dict( + "error diffusion" => FloydSteinberg(), + "ordered dithering" => Bayer(; level=3), + "closest color" => ClosestColor(), + "threshold dithering" => WhiteNoiseThreshold(), +) + +# Tag what is tested on each algorithm type +SUITE = BenchmarkGroup() +SUITE["error diffusion"] = BenchmarkGroup(["binary", "color"]) +SUITE["ordered dithering"] = BenchmarkGroup(["binary"]) +SUITE["threshold dithering"] = BenchmarkGroup(["binary"]) +SUITE["closest color"] = BenchmarkGroup(["binary", "color"]) + +println(SUITE) + +for (algname, alg) in algs + SUITE[algname]["binary new"] = @benchmarkable dither($(img_gray), $(alg)) + SUITE[algname]["binary inplace"] = @benchmarkable dither!($(copy(img_gray)), $(alg)) + + SUITE[algname]["per-channel new"] = @benchmarkable dither($(img_color), SeparateSpace($(alg))) + SUITE[algname]["per-channel inplace"] = @benchmarkable dither!($(copy(img_color)), SeparateSpace($(alg))) + + if "color" in SUITE[algname].tags + SUITE[algname]["color new"] = @benchmarkable dither($(img_color), $(alg), $(cs)) + SUITE[algname]["color inplace"] = @benchmarkable dither!($(copy(img_color)), $(alg), $(cs)) + end +end diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl new file mode 100644 index 0000000..6b65413 --- /dev/null +++ b/benchmark/run_benchmarks.jl @@ -0,0 +1,10 @@ +# To run benchmarks locally, BenchmarkCI should be added to root project. +# Then call: +# ```bash +# julia benchmark/run_benchmarks.jl +# ``` +using BenchmarkCI +on_CI = haskey(ENV, "GITHUB_ACTIONS") + +BenchmarkCI.judge() +on_CI ? BenchmarkCI.postjudge() : BenchmarkCI.displayjudgement()