Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: rework testing framework #69

Merged
merged 3 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ jobs:
matrix:
version:
- '*' # Latest Release
- '~1.6' # Current LTS
os:
- ubuntu-latest
- windows-latest
Expand Down
13 changes: 12 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,19 @@ BSON = "0.3"
BenchmarkTools = "1"
CodecZlib = "0.7"
JSON = "0.21"
Pkg = "1.9"
Revise = "3"
julia = "1.6"
Statistics = "1.9"
julia = "1.9"

[extras]
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"

[targets]
test = ["Test", "ReTestItems", "Revise", "UUIDs", "TOML", "Random", "Pkg"]
3 changes: 3 additions & 0 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,6 @@ BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
Example = "4b09cd0b-9172-4840-a79f-b48550c7f881"
PkgJogger = "10150987-6cc1-4b76-abee-b1c1cbd91c01"

[compat]
Documenter = "1.2"
2 changes: 1 addition & 1 deletion docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ makedocs(;
"Continuous Benchmarking" => "ci.md",
"Reference" => "reference.md",
],
strict=true,
checkdocs=:all,
)

deploydocs(;
Expand Down
12 changes: 0 additions & 12 deletions test/Project.toml

This file was deleted.

48 changes: 0 additions & 48 deletions test/backward_compat.jl

This file was deleted.

49 changes: 49 additions & 0 deletions test/backward_compat_test.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
@testitem "compat" setup=[ExamplePkg] begin
using Test
using JSON
using PkgJogger
using CodecZlib
using BenchmarkTools
using Dates

# Get Benchmarking results
jogger, cleanup = ExamplePkg.create_jogger()
b = jogger.benchmark()

# Save using JSON
function save_benchmarks(filename, results::BenchmarkTools.BenchmarkGroup)
# Collect system information to save
mkpath(dirname(filename))
out = Dict(
"julia" => PkgJogger.julia_info(),
"system" => PkgJogger.system_info(),
"datetime" => string(Dates.now()),
"benchmarks" => results,
"git" => PkgJogger.git_info(filename),
)

# Write benchmark to disk
open(GzipCompressorStream, filename, "w") do io
JSON.print(io, out)
end
end

@testset "Compat *.json.gz" begin
f = tempname(; cleanup=false) * ".json.gz"
finalizer(rm, f)
save_benchmarks(f, b)

# Check that the deprecated warming is logged
local b2
b2 = @test_logs (:warn, r"Legacy `\*\.json\.gz` format is deprecated.*") begin
jogger.load_benchmarks(f)
end

# Check that benchmarks are still there
@test b2 isa Dict
@test haskey(b2, "benchmarks")
@test b2["benchmarks"] isa BenchmarkTools.BenchmarkGroup
end

cleanup()
end
85 changes: 0 additions & 85 deletions test/ci.jl

This file was deleted.

11 changes: 11 additions & 0 deletions test/ci_test.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
@testitem "ci" setup=[ExamplePkg] begin
pkgjogger_env = Base.active_project()
jogger, cleanup = ExamplePkg.create_jogger()
dir = abspath(joinpath(jogger.BENCHMARK_DIR, ".."))
env = copy(ENV)
env["JULIA_LOAD_PATH"] = join(["@", "@stdlib", pkgjogger_env], Sys.iswindows() ? ";" : ":")
cmd = Cmd(`$(Base.julia_cmd()) --startup-file=no -e 'using PkgJogger; PkgJogger.ci()'`; env, dir)
p = run(cmd; wait=true)
@test success(p)
cleanup()
end
26 changes: 13 additions & 13 deletions test/judging.jl → test/judge_tests.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
@testitem "judging" setup=[ExamplePkg, BenchmarkTests] begin
using PkgJogger
using BenchmarkTools
using Example
@jog Example

include("utils.jl")
jogger, cleanup = ExamplePkg.create_jogger()

function gen_example()
results = JogExample.benchmark()
filename = JogExample.save_benchmarks(results)
dict = JogExample.load_benchmarks(filename)
results = jogger.benchmark()
filename = jogger.save_benchmarks(results)
dict = jogger.load_benchmarks(filename)
uuid = get_uuid(filename)
return results, filename, dict, uuid
end
Expand All @@ -25,7 +24,7 @@ new = gen_example()
old = gen_example()

@testset "JogPkgName.judge($(typeof(n)), $(typeof(o)))" for (n, o) in Iterators.product(new, old)
test_judge(JogExample.judge, n, o)
test_judge(jogger.judge, n, o)
end

@testset "PkgJogger.judge($(typeof(n)), $(typeof(o)))" for (n, o) in Iterators.product(new[1:3], old[1:3])
Expand All @@ -35,24 +34,25 @@ end
@testset "Missing Results - $(typeof(n))" for n in new
@testset "Empty Suite" begin
# Expect an empty judgement
judgement = test_judge(JogExample.judge, n, BenchmarkGroup())
judgement = test_judge(jogger.judge, n, BenchmarkGroup())
isempty(judgement)
end
@testset "Missing Benchmark Judgement" begin
# Get a suite of results to modify
ref = deepcopy(first(new))
ref_leaves = first.(leaves(ref))
ref_leaves = first.(BenchmarkTools.leaves(ref))

# Add a new Trial results
name, trial = first(leaves(ref))
name, trial = first(BenchmarkTools.leaves(ref))
name[end] = rand()
ref[name] = deepcopy(trial)

# Expect the extra benchmark to be skipped
judgement = test_judge(JogExample.judge, n, ref)
judgement_leaves = first.(leaves(judgement))
judgement = test_judge(jogger.judge, n, ref)
judgement_leaves = first.(BenchmarkTools.leaves(judgement))
@test Set(judgement_leaves) == Set(ref_leaves)
end
end
cleanup()

cleanup_example()
end
76 changes: 0 additions & 76 deletions test/locate_benchmarks.jl

This file was deleted.

Loading
Loading