From f1c20c4826498deaf9b16f9135dac30d8eb346bc Mon Sep 17 00:00:00 2001 From: Johnny Chen Date: Mon, 9 Mar 2020 02:09:39 +0800 Subject: [PATCH 1/2] initial benchmark with BenchmarkTools --- benchmark/Project.toml | 10 +++ benchmark/runbenchmark.jl | 47 ++++++++++++++ benchmark/views.jl | 131 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 188 insertions(+) create mode 100644 benchmark/Project.toml create mode 100644 benchmark/runbenchmark.jl create mode 100644 benchmark/views.jl diff --git a/benchmark/Project.toml b/benchmark/Project.toml new file mode 100644 index 0000000..a3b8ad4 --- /dev/null +++ b/benchmark/Project.toml @@ -0,0 +1,10 @@ +[deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +ImageCore = "a09fc81d-aa75-5fe9-8630-4744c3626534" +PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" + +[compat] +BenchmarkTools = "0.5" +TerminalLoggers = "0.1" diff --git a/benchmark/runbenchmark.jl b/benchmark/runbenchmark.jl new file mode 100644 index 0000000..11af7d6 --- /dev/null +++ b/benchmark/runbenchmark.jl @@ -0,0 +1,47 @@ +using BenchmarkTools +using Random +using Logging +using TerminalLoggers +using ImageCore, ColorVectorSpace + +# Showing a benchmark table of everything doesn't make much sense, hence we need to make +# some general and mild rules on how benchmark groups +# +# Operations are seperated into two basic groups: +# * baseline: operations on plain array data +# * imagecore: operations on data types introduced in Images type systems +# +# Trails are organized in the "what-how-property" way, for example, a trail on +# `colorview` is placed in Bsuite["colorview"]["getindex"]["RGB"]["(256, 256)"], one can +# reads it as: +# benchmark the performance of `colorview` for method `getindex` on `RGB` image +# of size `(256, 256)` +# +# The goals are: +# * minimize the performance overhead for operations that has trivial baseline +# implementation +# * avoid unexpected performance regression + +const SUITE = BenchmarkGroup( + "baseline" => BenchmarkGroup(), + "imagecore" => BenchmarkGroup() +) +const Bsuite = SUITE["baseline"] +const Csuite = SUITE["imagecore"] + +results = nothing +with_logger(TerminalLogger()) do + global results + + include("views.jl") + + + tune!(SUITE; verbose=true) + results = run(SUITE; verbose=true) +end + + +# TODO: export benchmark results +results + +judgement = median(results) diff --git a/benchmark/views.jl b/benchmark/views.jl new file mode 100644 index 0000000..3b800e1 --- /dev/null +++ b/benchmark/views.jl @@ -0,0 +1,131 @@ +# Different access patterns (getindex) +function mysum_elt_boundscheck(A) + s = zero(eltype(A)) + for a in A + s += a + end + s +end +function mysum_index_boundscheck(A) + s = zero(eltype(A)) + for I in eachindex(A) + s += A[I] + end + s +end +function mysum_elt_inbounds(A) + s = zero(eltype(A)) + @inbounds for a in A + s += a + end + s +end +function mysum_index_inbounds_simd(A) + s = zero(eltype(A)) + @inbounds @simd for I in eachindex(A) + s += A[I] + end + s +end +# setindex! +function myfill1!(A, val) + f = convert(eltype(A), val) + for I in eachindex(A) + A[I] = f + end + A +end +function myfill2!(A, val) + f = convert(eltype(A), val) + @inbounds @simd for I in eachindex(A) + A[I] = f + end + A +end + +############################# +# colorview/channelview # +############################# + +# image_sizes = ((128, 128), (1024, 1024)) +# image_colors = (Gray{Bool}, Gray{N0f8}, Gray{Float32}, RGB{N0f8}, RGB{Float32}) +image_sizes = ((128, 128), ) +image_colors = (Gray{N0f8}, ) + +getindex_funcs = (("elt_boundscheck", mysum_elt_boundscheck), + ("index_boundscheck", mysum_index_boundscheck), + ("elt_inbounds", mysum_elt_inbounds), + ("index_inbounds_simd", mysum_index_inbounds_simd)) +setindex_funcs = (("index_boundscheck", myfill1!), + ("index_inbounds_simd", myfill2!)) + + +for s in (Bsuite, Csuite) + s["colorview"] = BenchmarkGroup(["views", ]) + s["channelview"] = BenchmarkGroup(["views", ]) + s["channelview"]["getindex"] = BenchmarkGroup(["index", ]) + s["channelview"]["setindex"] = BenchmarkGroup(["index", ]) + s["colorview"]["setindex"] = BenchmarkGroup(["index", ]) + s["colorview"]["getindex"] = BenchmarkGroup(["index", ]) +end + + +for (fname, f) in getindex_funcs + for s in (Bsuite, Csuite) + s["channelview"]["getindex"][fname] = BenchmarkGroup() + s["colorview"]["getindex"][fname] = BenchmarkGroup() + end + + for C in image_colors + for s in (Bsuite, Csuite) + s["channelview"]["getindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + s["colorview"]["getindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + end + + for sz in image_sizes + Random.seed!(0) + A = rand(C, sz) + A_raw = copy(reinterpretc(eltype(C), A)) + A_color = colorview(base_color_type(C), A_raw) + A_chan = channelview(A) + + # baseline + Bsuite["channelview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A_raw) + Bsuite["colorview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A) + + # imagecore + Csuite["channelview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A_chan) + Csuite["colorview"]["getindex"][fname][C][sz] = @benchmarkable $(f)($A_color) + end + end +end + +for (fname, f) in setindex_funcs + for s in (Bsuite, Csuite) + s["channelview"]["setindex"][fname] = BenchmarkGroup() + s["colorview"]["setindex"][fname] = BenchmarkGroup() + end + + for C in image_colors + for s in (Bsuite, Csuite) + s["channelview"]["setindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + s["colorview"]["setindex"][fname][C] = BenchmarkGroup([string(base_color_type(C)), ]) + end + + for sz in image_sizes + Random.seed!(0) + A = rand(C, sz) + A_raw = copy(reinterpretc(eltype(C), A)) + A_color = colorview(base_color_type(C), A_raw) + A_chan = channelview(A) + + # baseline + Bsuite["channelview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A_raw, $(zero(eltype(C)))) + Bsuite["colorview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A, $(zero(C))) + + # imagecore + Csuite["channelview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A_chan, $(zero(eltype(C)))) + Csuite["colorview"]["setindex"][fname][C][sz] = @benchmarkable $(f)($A_color, $(zero(C))) + end + end +end From 1c42d0e0e244bba2d377d9097c6550aaf2ba59c0 Mon Sep 17 00:00:00 2001 From: Johnny Chen Date: Fri, 3 Jul 2020 19:18:08 +0800 Subject: [PATCH 2/2] setup benchmark CI --- .github/workflows/benchmark.yml | 30 ++++++++++++++++++++ .gitignore | 2 ++ benchmark/Project.toml | 5 ---- benchmark/{runbenchmark.jl => benchmarks.jl} | 22 +++----------- benchmark/run_benchmark.jl | 6 ++++ 5 files changed, 42 insertions(+), 23 deletions(-) create mode 100644 .github/workflows/benchmark.yml rename benchmark/{runbenchmark.jl => benchmarks.jl} (78%) create mode 100644 benchmark/run_benchmark.jl diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000..0cbe638 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,30 @@ +name: Run benchmarks + +on: + pull_request: + types: [labeled, opened, synchronize, reopened] + +jobs: + Benchmark: + runs-on: ubuntu-latest + if: contains(github.event.pull_request.labels.*.name, 'run benchmark') + steps: + - uses: actions/checkout@v2 + - uses: julia-actions/setup-julia@latest + - name: Cache artifacts + uses: actions/cache@v1 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} + restore-keys: | + ${{ runner.os }}-test-${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- + - name: Install dependencies + run: julia -e 'using Pkg; pkg"add JSON PkgBenchmark BenchmarkCI@0.1"' + - name: Run benchmarks + run: julia benchmark/run_benchmarks.jl + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 8df2915..0d873d5 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ docs/build/ docs/site/ docs/notebooks/.ipynb_checkpoints codecov.io_token +/.benchmarkci +/benchmark/*.json diff --git a/benchmark/Project.toml b/benchmark/Project.toml index a3b8ad4..040f01a 100644 --- a/benchmark/Project.toml +++ b/benchmark/Project.toml @@ -3,8 +3,3 @@ BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" ImageCore = "a09fc81d-aa75-5fe9-8630-4744c3626534" PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" - -[compat] -BenchmarkTools = "0.5" -TerminalLoggers = "0.1" diff --git a/benchmark/runbenchmark.jl b/benchmark/benchmarks.jl similarity index 78% rename from benchmark/runbenchmark.jl rename to benchmark/benchmarks.jl index 11af7d6..bc7c222 100644 --- a/benchmark/runbenchmark.jl +++ b/benchmark/benchmarks.jl @@ -1,7 +1,8 @@ +# Usage: +# julia benchmark/run_benchmarks.jl + using BenchmarkTools using Random -using Logging -using TerminalLoggers using ImageCore, ColorVectorSpace # Showing a benchmark table of everything doesn't make much sense, hence we need to make @@ -29,19 +30,4 @@ const SUITE = BenchmarkGroup( const Bsuite = SUITE["baseline"] const Csuite = SUITE["imagecore"] -results = nothing -with_logger(TerminalLogger()) do - global results - - include("views.jl") - - - tune!(SUITE; verbose=true) - results = run(SUITE; verbose=true) -end - - -# TODO: export benchmark results -results - -judgement = median(results) +include("views.jl") diff --git a/benchmark/run_benchmark.jl b/benchmark/run_benchmark.jl new file mode 100644 index 0000000..fa1f0bc --- /dev/null +++ b/benchmark/run_benchmark.jl @@ -0,0 +1,6 @@ +# To run it locally, BenchmarkCI should be added to root project +using BenchmarkCI +on_CI = haskey(ENV, "GITHUB_ACTIONS") + +BenchmarkCI.judge() +on_CI ? BenchmarkCI.postjudge() : BenchmarkCI.displayjudgement()