diff --git a/docs/benchmarks.md b/docs/benchmarks.md index 71a9dca..a8b4ac8 100644 --- a/docs/benchmarks.md +++ b/docs/benchmarks.md @@ -260,7 +260,8 @@ using output_types = nvbench::type_list; NVBENCH_BENCH_TYPES(benchmark, NVBENCH_TYPE_AXES(input_types, output_types)) .set_type_axes_names({"InputType", "OutputType"}) .add_int64_axis("NumInputs", {1000, 10000, 100000, 200000, 200000, 200000}) - .add_float64_axis("Quality", {0.05, 0.1, 0.25, 0.5, 0.75, 1.}); + .add_float64_axis("Quality", {0.05, 0.1, 0.25, 0.5, 0.75, 1.}) + .zip_axes({"NumInputs", "Quality"}); ``` This tieing reduces the total combinations from 24 to 6, reducing the diff --git a/examples/custom_iteration_spaces.cu b/examples/custom_iteration_spaces.cu index 92323fd..c733890 100644 --- a/examples/custom_iteration_spaces.cu +++ b/examples/custom_iteration_spaces.cu @@ -74,7 +74,7 @@ NVBENCH_BENCH(tied_copy_sweep_grid_shape) // Every power of two from 64->1024: .add_int64_axis("BlockSize", {32,64,128,256}) .add_int64_axis("NumBlocks", {1024,512,256,128}) - .tie_axes({"BlockSize", "NumBlocks"}); + .zip_axes({"BlockSize", "NumBlocks"}); //============================================================================== // under_diag: diff --git a/nvbench/axes_metadata.cuh b/nvbench/axes_metadata.cuh index cb14e97..053ebe6 100644 --- a/nvbench/axes_metadata.cuh +++ b/nvbench/axes_metadata.cuh @@ -62,7 +62,7 @@ struct axes_metadata void add_string_axis(std::string name, std::vector data); - void tie_axes(std::vector names); + void zip_axes(std::vector names); void user_iteration_axes(std::vector names, diff --git a/nvbench/axes_metadata.cxx b/nvbench/axes_metadata.cxx index 9dd679d..c39fed9 100644 --- a/nvbench/axes_metadata.cxx +++ b/nvbench/axes_metadata.cxx @@ -238,12 +238,12 @@ void reset_iteration_space( } } // namespace -void axes_metadata::tie_axes(std::vector names) +void axes_metadata::zip_axes(std::vector names) { NVBENCH_THROW_IF((names.size() < 2), std::runtime_error, "At least two axi names ( {} provided ) need to be provided " - "when using tie_axes.", + "when using zip_axes.", names.size()); // compute the numeric indice for each name we have @@ -269,7 +269,7 @@ void axes_metadata::tie_axes(std::vector names) reset_iteration_space(m_value_space, input_indices); // add the new tied iteration space - auto tied = std::make_unique(std::move(input_indices), + auto tied = std::make_unique(std::move(input_indices), std::move(output_indices)); m_value_space.push_back(std::move(tied)); } diff --git a/nvbench/axis_iteration_space.cuh b/nvbench/axis_iteration_space.cuh index 7c045be..a17634e 100644 --- a/nvbench/axis_iteration_space.cuh +++ b/nvbench/axis_iteration_space.cuh @@ -67,11 +67,11 @@ struct linear_axis_space final : axis_space_base std::size_t do_valid_count(const axes_info &info) const override; }; -struct tie_axis_space final : axis_space_base +struct zip_axis_space final : axis_space_base { - tie_axis_space(std::vector input_indices, + zip_axis_space(std::vector input_indices, std::vector output_indices); - ~tie_axis_space(); + ~zip_axis_space(); std::unique_ptr do_clone() const override; detail::axis_space_iterator do_iter(axes_info info) const override; diff --git a/nvbench/axis_iteration_space.cxx b/nvbench/axis_iteration_space.cxx index 2f93139..885a1ea 100644 --- a/nvbench/axis_iteration_space.cxx +++ b/nvbench/axis_iteration_space.cxx @@ -127,14 +127,14 @@ std::unique_ptr linear_axis_space::do_clone() const return std::make_unique(*this); } -tie_axis_space::tie_axis_space(std::vector input_indices, +zip_axis_space::zip_axis_space(std::vector input_indices, std::vector output_indices) : axis_space_base(std::move(input_indices), std::move(output_indices)) {} -tie_axis_space::~tie_axis_space() = default; +zip_axis_space::~zip_axis_space() = default; -detail::axis_space_iterator tie_axis_space::do_iter(axes_info info) const +detail::axis_space_iterator zip_axis_space::do_iter(axes_info info) const { std::vector locs = m_output_indices; auto update_func = [=](std::size_t inc_index, @@ -150,19 +150,19 @@ detail::axis_space_iterator tie_axis_space::do_iter(axes_info info) const return detail::make_space_iterator(locs.size(), info[0].size, update_func); } -std::size_t tie_axis_space::do_size(const axes_info &info) const +std::size_t zip_axis_space::do_size(const axes_info &info) const { return info[0].size; } -std::size_t tie_axis_space::do_valid_count(const axes_info &info) const +std::size_t zip_axis_space::do_valid_count(const axes_info &info) const { return info[0].active_size; } -std::unique_ptr tie_axis_space::do_clone() const +std::unique_ptr zip_axis_space::do_clone() const { - return std::make_unique(*this); + return std::make_unique(*this); } user_axis_space::user_axis_space(std::vector input_indices, diff --git a/nvbench/benchmark_base.cuh b/nvbench/benchmark_base.cuh index 94908d1..61269e1 100644 --- a/nvbench/benchmark_base.cuh +++ b/nvbench/benchmark_base.cuh @@ -111,9 +111,9 @@ struct benchmark_base return *this; } - benchmark_base &tie_axes(std::vector names) + benchmark_base &zip_axes(std::vector names) { - m_axes.tie_axes(std::move(names)); + m_axes.zip_axes(std::move(names)); return *this; } diff --git a/testing/axes_iteration_space.cu b/testing/axes_iteration_space.cu index eb3862e..fca5757 100644 --- a/testing/axes_iteration_space.cu +++ b/testing/axes_iteration_space.cu @@ -77,13 +77,13 @@ void template_no_op_generator(nvbench::state &state, NVBENCH_DEFINE_CALLABLE_TEMPLATE(template_no_op_generator, template_no_op_callable); -void test_tie_axes() +void test_zip_axes() { using benchmark_type = nvbench::benchmark; benchmark_type bench; bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.}); bench.add_int64_axis("I64 Axis", {1, 3, 2, 4, 5}); - bench.tie_axes({"F64 Axis", "I64 Axis"}); + bench.zip_axes({"F64 Axis", "I64 Axis"}); ASSERT_MSG(bench.get_config_count() == 5 * bench.get_devices().size(), "Got {}", @@ -97,10 +97,10 @@ void test_tie_invalid_names() bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.}); bench.add_int64_axis("I64 Axis", {1, 3, 2}); - ASSERT_THROWS_ANY(bench.tie_axes({"F32 Axis", "I64 Axis"})); - ASSERT_THROWS_ANY(bench.tie_axes({"F32 Axis"})); - ASSERT_THROWS_ANY(bench.tie_axes({""})); - ASSERT_THROWS_ANY(bench.tie_axes(std::vector())); + ASSERT_THROWS_ANY(bench.zip_axes({"F32 Axis", "I64 Axis"})); + ASSERT_THROWS_ANY(bench.zip_axes({"F32 Axis"})); + ASSERT_THROWS_ANY(bench.zip_axes({""})); + ASSERT_THROWS_ANY(bench.zip_axes(std::vector())); } void test_tie_unequal_length() @@ -110,8 +110,8 @@ void test_tie_unequal_length() bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.}); bench.add_int64_axis("I64 Axis", {1, 3, 2}); - bench.tie_axes({"I64 Axis", "F64 Axis"}); - ASSERT_THROWS_ANY(bench.tie_axes({"F64 Axis", "I64 Axis"})); + bench.zip_axes({"I64 Axis", "F64 Axis"}); + ASSERT_THROWS_ANY(bench.zip_axes({"F64 Axis", "I64 Axis"})); } void test_tie_type_axi() @@ -126,10 +126,10 @@ void test_tie_type_axi() bench.add_float64_axis("F64 Axis", {0., .1, .25, .5, 1.}); bench.add_int64_axis("I64 Axis", {1, 3, 2}); - ASSERT_THROWS_ANY(bench.tie_axes({"F64 Axis", "Float"})); + ASSERT_THROWS_ANY(bench.zip_axes({"F64 Axis", "Float"})); } -void test_retie_axes() +void test_rezip_axes() { using benchmark_type = nvbench::benchmark; benchmark_type bench; @@ -142,20 +142,20 @@ void test_retie_axes() .1, }); - bench.tie_axes({"FAxis_5", "IAxis_A"}); - bench.tie_axes({"IAxis_B", "FAxis_5", "IAxis_A"}); // re-tie + bench.zip_axes({"FAxis_5", "IAxis_A"}); + bench.zip_axes({"IAxis_B", "FAxis_5", "IAxis_A"}); // re-tie ASSERT_MSG(bench.get_config_count() == 10 * bench.get_devices().size(), "Got {}", bench.get_config_count()); - bench.tie_axes({"FAxis_5", "IAxis_A"}); + bench.zip_axes({"FAxis_5", "IAxis_A"}); ASSERT_MSG(bench.get_config_count() == 50 * bench.get_devices().size(), "Got {}", bench.get_config_count()); } -void test_retie_axes2() +void test_rezip_axes2() { using benchmark_type = nvbench::benchmark; benchmark_type bench; @@ -170,17 +170,17 @@ void test_retie_axes2() .1, }); - bench.tie_axes({"IAxis_A", "IAxis_B", "IAxis_C"}); - bench.tie_axes({"FAxis_1", "FAxis_2"}); - bench.tie_axes( + bench.zip_axes({"IAxis_A", "IAxis_B", "IAxis_C"}); + bench.zip_axes({"FAxis_1", "FAxis_2"}); + bench.zip_axes( {"IAxis_A", "IAxis_B", "IAxis_C", "FAxis_1", "FAxis_2"}); // re-tie ASSERT_MSG(bench.get_config_count() == 10 * bench.get_devices().size(), "Got {}", bench.get_config_count()); - bench.tie_axes({"IAxis_A", "IAxis_B", "IAxis_C"}); - bench.tie_axes({"FAxis_1", "FAxis_2"}); + bench.zip_axes({"IAxis_A", "IAxis_B", "IAxis_C"}); + bench.zip_axes({"FAxis_1", "FAxis_2"}); ASSERT_MSG(bench.get_config_count() == 50 * bench.get_devices().size(), "Got {}", bench.get_config_count()); @@ -195,7 +195,7 @@ void test_tie_clone() bench.add_int64_power_of_two_axis("I64 POT Axis", {10, 20}); bench.add_int64_axis("I64 Axis", {10, 20}); bench.add_float64_axis("F64 Axis", {0., .1, .25}); - bench.tie_axes({"F64 Axis", "Strings"}); + bench.zip_axes({"F64 Axis", "Strings"}); const auto expected_count = bench.get_config_count(); @@ -316,11 +316,11 @@ void test_user_axes() int main() { - test_tie_axes(); + test_zip_axes(); test_tie_invalid_names(); test_tie_unequal_length(); test_tie_type_axi(); - test_retie_axes(); - test_retie_axes2(); + test_rezip_axes(); + test_rezip_axes2(); test_tie_clone(); }