diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000..0cbe638 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,30 @@ +name: Run benchmarks + +on: + pull_request: + types: [labeled, opened, synchronize, reopened] + +jobs: + Benchmark: + runs-on: ubuntu-latest + if: contains(github.event.pull_request.labels.*.name, 'run benchmark') + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@latest + - name: Cache artifacts + uses: actions/cache@v1 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + restore-keys: | + ${{ runner.os }}-test-${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- + - name: Install dependencies + run: julia -e 'using Pkg; pkg"add JSON PkgBenchmark BenchmarkCI@0.1"' + - name: Run benchmarks + run: julia benchmark/run_benchmarks.jl + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 8df2915..0d873d5 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ docs/build/ docs/site/ docs/notebooks/.ipynb_checkpoints codecov.io_token +/.benchmarkci +/benchmark/*.json diff --git a/benchmark/Project.toml b/benchmark/Project.toml new file mode 100644 index 0000000..040f01a --- /dev/null +++ b/benchmark/Project.toml @@ -0,0 +1,5 @@ +[deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +ImageCore = "a09fc81d-aa75-5fe9-8630-4744c3626534" +PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl new file mode 100644 index 0000000..bc7c222 --- /dev/null +++ b/benchmark/benchmarks.jl @@ -0,0 +1,33 @@ +# Usage: +# julia benchmark/run_benchmarks.jl + +using BenchmarkTools +using Random +using ImageCore, ColorVectorSpace + +# Showing a benchmark table of everything doesn't make much sense, hence we need to make +# some general and mild rules on how benchmark groups +# +# Operations are seperated into two basic groups: +# * baseline: operations on plain array data +# * imagecore: operations on data types introduced in Images type systems +# +# Trails are organized in the "what-how-property" way, for example, a trail on +# `colorview` is placed in Bsuite["colorview"]["getindex"]["RGB"]["(256, 256)"], one can +# reads it as: +# benchmark the performance of `colorview` for method `getindex` on `RGB` image +# of size `(256, 256)` +# +# The goals are: +# * minimize the performance overhead for operations that has trivial baseline +# implementation +# * avoid unexpected performance regression + +const SUITE = BenchmarkGroup( + "baseline" => BenchmarkGroup(), + "imagecore" => BenchmarkGroup() +) +const Bsuite = SUITE["baseline"] +const Csuite = SUITE["imagecore"] + +include("views.jl") diff --git a/benchmark/run_benchmark.jl b/benchmark/run_benchmark.jl new file mode 100644 index 0000000..fa1f0bc --- /dev/null +++ b/benchmark/run_benchmark.jl @@ -0,0 +1,6 @@ +# To run it locally, BenchmarkCI should be added to root project +using BenchmarkCI +on_CI = haskey(ENV, "GITHUB_ACTIONS") + +BenchmarkCI.judge() +on_CI ? BenchmarkCI.postjudge() : BenchmarkCI.displayjudgement() diff --git a/benchmark/views.jl b/benchmark/views.jl new file mode 100644 index 0000000..3b800e1 --- /dev/null +++ b/benchmark/views.jl @@ -0,0 +1,131 @@ +# Different access patterns (getindex) +function mysum_elt_boundscheck(A) + s = zero(eltype(A)) + for a in A + s += a + end + s +end +function mysum_index_boundscheck(A) + s = zero(eltype(A)) + for I in eachindex(A) + s += A[I] + end + s +end +function mysum_elt_inbounds(A) + s = zero(eltype(A)) + @inbounds for a in A + s += a + end + s +end +function mysum_index_inbounds_simd(A) + s = zero(eltype(A)) + @inbounds @simd for I in eachindex(A) + s += A[I] + end + s +end +# setindex! +function myfill1!(A, val) + f = convert(eltype(A), val) + for I in eachindex(A) + A[I] = f + end + A +end +function myfill2!(A, val) + f = convert(eltype(A), val) + @inbounds @simd for I in eachindex(A) + A[I] = f + end + A +end + +############################# +# colorview/channelview # +############################# + +# image_sizes = ((128, 128), (1024, 1024)) +# image_colors = (Gray{Bool}, Gray{N0f8}, Gray{Float32}, RGB{N0f8}, RGB{Float32}) +image_sizes = ((128, 128), ) +image_colors = (Gray{N0f8}, ) + +getindex_funcs = (("elt_boundscheck", mysum_elt_boundscheck), + ("index_boundscheck", mysum_index_boundscheck), + ("elt_inbounds", mysum_elt_inbounds), + ("index_inbounds_simd", mysum_index_inbounds_simd)) +setindex_funcs = (("index_boundscheck", myfill1!), + ("index_inbounds_simd", myfill2!)) + + +for s in (Bsuite, Csuite) + s["colorview"] = BenchmarkGroup(["views", ]) + s["channelview"] = BenchmarkGroup(["views", ]) + s["channelview"]["getindex"] = BenchmarkGroup(["index", ]) + s["channelview"]["setindex"] = BenchmarkGroup(["index", ]) + s["colorview"]["setindex"] = BenchmarkGroup(["index", ]) + s["colorview"]["getindex"] = BenchmarkGroup(["index", ]) +end + + +for (fname, f) in getindex_funcs + for s in (Bsuite, Csuite) + s["channelview"]["getindex"][fname] = BenchmarkGroup() + s["colorview"]["getindex"][fname] = BenchmarkGroup() + end + + for C in image_colors + for s in (Bsuite, Csuite) + s["channelview"]["getindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + s["colorview"]["getindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + end + + for sz in image_sizes + Random.seed!(0) + A = rand(C, sz) + A_raw = copy(reinterpretc(eltype(C), A)) + A_color = colorview(base_color_type(C), A_raw) + A_chan = channelview(A) + + # baseline + Bsuite["channelview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A_raw) + Bsuite["colorview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A) + + # imagecore + Csuite["channelview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A_chan) + Csuite["colorview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A_color) + end + end +end + +for (fname, f) in setindex_funcs + for s in (Bsuite, Csuite) + s["channelview"]["setindex"][fname] = BenchmarkGroup() + s["colorview"]["setindex"][fname] = BenchmarkGroup() + end + + for C in image_colors + for s in (Bsuite, Csuite) + s["channelview"]["setindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + s["colorview"]["setindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + end + + for sz in image_sizes + Random.seed!(0) + A = rand(C, sz) + A_raw = copy(reinterpretc(eltype(C), A)) + A_color = colorview(base_color_type(C), A_raw) + A_chan = channelview(A) + + # baseline + Bsuite["channelview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A_raw, $(zero(eltype(C)))) + Bsuite["colorview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A, $(zero(C))) + + # imagecore + Csuite["channelview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A_chan, $(zero(eltype(C)))) + Csuite["colorview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A_color, $(zero(C))) + end + end +end