Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Adds specializations for handling sparse matrix unary operations #2599

Draft
wants to merge 19 commits into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
3c0b5ea
Adds specializations for handling sparse matrix unary operations and …
SteveBronder Oct 5, 2021
c647ad7
have checks for the correct return type for sparse matrix unary
SteveBronder Oct 5, 2021
7589e59
update tests
SteveBronder Oct 5, 2021
d141664
[Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.0…
stan-buildbot Oct 5, 2021
27d2a7b
All sparse unary functions return sparse matrices
SteveBronder Oct 7, 2021
a7971f2
[Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.0…
stan-buildbot Oct 7, 2021
83eb6fd
Small cleanup
SteveBronder Oct 7, 2021
e53238e
[Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.0…
stan-buildbot Oct 7, 2021
01f5ea7
fix headers
SteveBronder Oct 7, 2021
6ca05af
add cos
SteveBronder Oct 7, 2021
b6144b2
apply zero or nonzeroing to unary funcs
SteveBronder Oct 7, 2021
03d446d
[Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.0…
stan-buildbot Oct 7, 2021
af21d26
fix headers
SteveBronder Oct 8, 2021
787f3c2
Merge branch 'develop' into feature/unary-sparsematrix-ops
SteveBronder Mar 30, 2022
30190e3
fixup log1m_inv_logit return type so it just uses auto
SteveBronder Mar 31, 2022
3723203
Merge commit 'b285b51a494ebf5aeb1434b0e34acbf275cabbd7' into HEAD
yashikno Mar 31, 2022
d7adc5e
[Jenkins] auto-formatting by clang-format version 10.0.0-4ubuntu1
stan-buildbot Mar 31, 2022
dc4c178
Merge remote-tracking branch 'origin/develop' into feature/unary-spar…
SteveBronder May 17, 2022
729456d
move opencl headers below fwd in mix.hpp
SteveBronder May 17, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion stan/math/prim/fun/acos.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ template <typename Container,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<
Container>* = nullptr>
inline auto acos(const Container& x) {
return apply_scalar_unary<acos_fun, Container>::apply(x);
return apply_scalar_unary<acos_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/cos.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ template <typename Container,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<
Container>* = nullptr>
inline auto cos(const Container& x) {
return apply_scalar_unary<cos_fun, Container>::apply(x);
return apply_scalar_unary<cos_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/digamma.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ template <typename T,
require_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr,
require_not_var_matrix_t<T>* = nullptr>
inline auto digamma(const T& x) {
return apply_scalar_unary<digamma_fun, T>::apply(x);
return apply_scalar_unary<digamma_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/erfc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ template <
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr,
require_not_var_matrix_t<T>* = nullptr>
inline auto erfc(const T& x) {
return apply_scalar_unary<erfc_fun, T>::apply(x);
return apply_scalar_unary<erfc_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/exp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ template <
require_not_nonscalar_prim_or_rev_kernel_expression_t<Container>* = nullptr,
require_not_var_matrix_t<Container>* = nullptr>
inline auto exp(const Container& x) {
return apply_scalar_unary<exp_fun, Container>::apply(x);
return apply_scalar_unary<exp_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/exp2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ template <
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr,
require_not_var_matrix_t<T>* = nullptr>
inline auto exp2(const T& x) {
return apply_scalar_unary<exp2_fun, T>::apply(x);
return apply_scalar_unary<exp2_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/inv.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ template <
typename T, require_not_container_st<std::is_arithmetic, T>* = nullptr,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline auto inv(const T& x) {
return apply_scalar_unary<inv_fun, T>::apply(x);
return apply_scalar_unary<inv_fun, T, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/inv_Phi.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ template <
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr,
require_not_var_matrix_t<T>* = nullptr>
inline auto inv_Phi(const T& x) {
return apply_scalar_unary<inv_Phi_fun, T>::apply(x);
return apply_scalar_unary<inv_Phi_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/inv_cloglog.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ template <typename Container,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<
Container>* = nullptr>
inline auto inv_cloglog(const Container& x) {
return apply_scalar_unary<inv_cloglog_fun, Container>::apply(x);
return apply_scalar_unary<inv_cloglog_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/inv_logit.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ template <
typename T, require_not_var_matrix_t<T>* = nullptr,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline auto inv_logit(const T& x) {
return apply_scalar_unary<inv_logit_fun, T>::apply(x);
return apply_scalar_unary<inv_logit_fun, T, true>::apply(x);
}

// TODO(Tadej): Eigen is introducing their implementation logistic() of this
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/inv_sqrt.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ template <typename Container,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<
Container>* = nullptr>
inline auto inv_sqrt(const Container& x) {
return apply_scalar_unary<inv_sqrt_fun, Container>::apply(x);
return apply_scalar_unary<inv_sqrt_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/lgamma.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ struct lgamma_fun {
template <typename T, require_not_var_matrix_t<T>* = nullptr,
require_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline auto lgamma(const T& x) {
return apply_scalar_unary<lgamma_fun, T>::apply(x);
return apply_scalar_unary<lgamma_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ template <
require_not_var_matrix_t<Container>* = nullptr,
require_not_nonscalar_prim_or_rev_kernel_expression_t<Container>* = nullptr>
inline auto log(const Container& x) {
return apply_scalar_unary<log_fun, Container>::apply(x);
return apply_scalar_unary<log_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log10.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ template <
require_not_container_st<std::is_arithmetic, Container>* = nullptr,
require_not_nonscalar_prim_or_rev_kernel_expression_t<Container>* = nullptr>
inline auto log10(const Container& x) {
return apply_scalar_unary<log10_fun, Container>::apply(x);
return apply_scalar_unary<log10_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log1m.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ template <
typename T, require_not_var_matrix_t<T>* = nullptr,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline auto log1m(const T& x) {
return apply_scalar_unary<log1m_fun, T>::apply(x);
return apply_scalar_unary<log1m_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log1m_exp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ template <
typename T, require_not_var_matrix_t<T>* = nullptr,
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline auto log1m_exp(const T& x) {
return apply_scalar_unary<log1m_exp_fun, T>::apply(x);
return apply_scalar_unary<log1m_exp_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
5 changes: 2 additions & 3 deletions stan/math/prim/fun/log1m_inv_logit.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,8 @@ struct log1m_inv_logit_fun {
*/
template <typename T, require_not_var_matrix_t<T>* = nullptr,
require_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline typename apply_scalar_unary<log1m_inv_logit_fun, T>::return_t
log1m_inv_logit(const T& x) {
return apply_scalar_unary<log1m_inv_logit_fun, T>::apply(x);
inline auto log1m_inv_logit(const T& x) {
return apply_scalar_unary<log1m_inv_logit_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log1p.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ template <typename T,
require_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr,
require_not_var_matrix_t<T>* = nullptr>
inline auto log1p(const T& x) {
return apply_scalar_unary<log1p_fun, T>::apply(x);
return apply_scalar_unary<log1p_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log1p_exp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ template <typename T,
require_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr,
require_not_var_matrix_t<T>* = nullptr>
inline auto log1p_exp(const T& x) {
return apply_scalar_unary<log1p_exp_fun, T>::apply(x);
return apply_scalar_unary<log1p_exp_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ struct log2_fun {
template <typename T, require_not_var_matrix_t<T>* = nullptr,
require_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline auto log2(const T& x) {
return apply_scalar_unary<log2_fun, T>::apply(x);
return apply_scalar_unary<log2_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/log_inv_logit.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ struct log_inv_logit_fun {
template <typename T,
require_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr>
inline auto log_inv_logit(const T& x) {
return apply_scalar_unary<log_inv_logit_fun, T>::apply(x);
return apply_scalar_unary<log_inv_logit_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/logit.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ template <
require_not_var_matrix_t<Container>* = nullptr,
require_not_nonscalar_prim_or_rev_kernel_expression_t<Container>* = nullptr>
inline auto logit(const Container& x) {
return apply_scalar_unary<logit_fun, Container>::apply(x);
return apply_scalar_unary<logit_fun, Container, true>::apply(x);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/fun/tgamma.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ template <
require_all_not_nonscalar_prim_or_rev_kernel_expression_t<T>* = nullptr,
require_not_var_matrix_t<T>* = nullptr>
inline auto tgamma(const T& x) {
return apply_scalar_unary<tgamma_fun, T>::apply(x);
return apply_scalar_unary<tgamma_fun, T, true>::apply(x);
}

} // namespace math
Expand Down
102 changes: 84 additions & 18 deletions stan/math/prim/functor/apply_scalar_unary.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,7 @@
#define STAN_MATH_PRIM_FUNCTOR_APPLY_SCALAR_UNARY_HPP

#include <stan/math/prim/fun/Eigen.hpp>
#include <stan/math/prim/meta/is_eigen.hpp>
#include <stan/math/prim/meta/is_complex.hpp>
#include <stan/math/prim/meta/require_generics.hpp>
#include <stan/math/prim/meta/is_vector.hpp>
#include <stan/math/prim/meta/is_vector_like.hpp>
#include <stan/math/prim/meta/plain_type.hpp>
#include <stan/math/prim/meta.hpp>
#include <utility>
#include <vector>

Expand Down Expand Up @@ -36,8 +31,12 @@ namespace math {
*
* @tparam F Type of function to apply.
* @tparam T Type of argument to which function is applied.
* @tparam ApplyZero If true, the function applied is assumed to return zero for
* inputs of zero and so sparse matrices will return sparse matrices. A value
* of false will return a dense matrix for sparse matrices.
*/
template <typename F, typename T, typename Enable = void>
template <typename F, typename T, bool ApplyZero = false,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this might be a bit clearer if the naming was ReturnSparse or ReturnDense (or something similar), so that it's explicit that the flag will affect the return type

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry I need to update the docs and this PRs summary. After discussion with @bob-carpenter, @nhuurre, and the Eigen dev team (mostly in #2597) I think it makes more sense to return a sparse matrix with all entries filled in. I do kind of wonder if we should have something like exp_nonzero(sparse_matrix) available at the Stan language level where it will only run over the nonzero entries. But we can sort that out at a later time

typename Enable = void>
struct apply_scalar_unary;

/**
Expand All @@ -48,8 +47,8 @@ struct apply_scalar_unary;
* @tparam F Type of function to apply.
* @tparam T Type of argument to which function is applied.
*/
template <typename F, typename T>
struct apply_scalar_unary<F, T, require_eigen_t<T>> {
template <typename F, typename T, bool ApplyZero>
struct apply_scalar_unary<F, T, ApplyZero, require_eigen_t<T>> {
/**
* Type of underlying scalar for the matrix type T.
*/
Expand All @@ -59,15 +58,82 @@ struct apply_scalar_unary<F, T, require_eigen_t<T>> {
* Return the result of applying the function defined by the
* template parameter F to the specified matrix argument.
*
* @tparam DenseMat A type derived from `Eigen::DenseBase`.
* @param x Matrix to which operation is applied.
* @return Componentwise application of the function specified
* by F to the specified matrix.
*/
static inline auto apply(const T& x) {
template <typename DenseMat, require_eigen_dense_base_t<DenseMat>* = nullptr>
static inline auto apply(const DenseMat& x) {
return x.unaryExpr(
[](scalar_t x) { return apply_scalar_unary<F, scalar_t>::apply(x); });
}

/**
* Special case for `ApplyZero` set to true, returning a full sparse matrix.
* Return the result of applying the function defined by the template
* parameter F to the specified matrix argument.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The doc for both overloads state that a sparse matrix is returned, so might need to clarify which is which

*
* @param SparseMat A type derived from `Eigen::SparseMatrixBase`
* @tparam NonZeroZero Shortcut trick for using class template for deduction,
* should not be set manually.
* @param x Matrix to which operation is applied.
* @return Componentwise application of the function specified
* by F to the specified matrix.
*/
template <typename SparseMat, bool NonZeroZero = ApplyZero,
require_t<bool_constant<NonZeroZero>>* = nullptr,
require_eigen_sparse_base_t<SparseMat>* = nullptr>
static inline auto apply(const SparseMat& x) {
using val_t = value_type_t<SparseMat>;
using triplet_t = Eigen::Triplet<val_t>;
auto zeroed_val = apply_scalar_unary<F, scalar_t>::apply(val_t(0.0));
const auto x_size = x.size();
std::vector<triplet_t> triplet_list(x_size, triplet_t(0, 0, zeroed_val));
for (Eigen::Index i = 0; i < x.rows(); ++i) {
for (Eigen::Index j = 0; j < x.cols(); ++j) {
// Column major order
triplet_list[i * x.cols() + j] = triplet_t(i, j, zeroed_val);
}
}
for (Eigen::Index k = 0; k < x.outerSize(); ++k) {
for (typename SparseMat::InnerIterator it(x, k); it; ++it) {
triplet_list[it.row() * x.cols() + it.col()]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than iterating over each element, would it be more performant to Map the values as a vector and call the vectorised implementation? For example:

Map<Eigen::VectorXd> mapSparse(x.valuePtr(), x.nonZeros());

apply_scalar_unary<F, Eigen::VectorXd>::apply(mapSparse);

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would def be better, though I avoided it because I was a bit confused about the compressed sparse matrix scheme Eigen uses. If you look at their docs here under 'The SparseMatrix class' they sometimes allow empty values so its easier to write to the sparse matrix like

Values:	22	7	_	3	5	14	_	_	1	_	17	8
InnerIndices:	1	2	_	0	2	4	_	_	2	_	1	4

I was worried that directly grabbing the values could lead to some bad things since there may be places in the value pointer that are not initialized. Another issue is that our arena_matrix scheme assumes that if you are passing it an Eigen map that the memory has already been allocated elsewhere for use in the reverse mode pass, which may not explicitly be true for sparse matrices.

So I agree what your saying here is def faster, but imo for this first pass I'd rather do the safer path atm. Once we have everything working at the language level and know more about our constraints and assumptions we can make then we can come back and do these kinds of optimizations.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, okay now I'm thinking about this more and I think that at the stan language level we can make it so that you can only assign to elements that are already non-zero. So I wonder if what we can do here is just assume that we are always working with compressed matrices, since then we don't have to worry about the uninitialized values and can do the fast thing safely

= triplet_t(it.row(), it.col(),
apply_scalar_unary<F, scalar_t>::apply(it.value()));
}
}
plain_type_t<SparseMat> ret(x.rows(), x.cols());
ret.setFromTriplets(triplet_list.begin(), triplet_list.end());
return ret;
}

/**
* Special case for `ApplyZero` set to false, returning a sparse matrix.
* Return the result of applying the function defined by the template
* parameter F to the specified matrix argument.
*
* @tparam SparseMat A type derived from `Eigen::SparseMatrixBase`
* @tparam NonZeroZero Shortcut trick for using class template for deduction,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doc type name doesn't match function declaration.

Also, neat trick!

* should not be set manually.
* @param x Matrix to which operation is applied.
* @return Componentwise application of the function specified
* by F to the specified matrix.
*/
template <typename SparseMat, bool ReturnZeros = ApplyZero,
require_t<bool_constant<!ReturnZeros>>* = nullptr,
require_eigen_sparse_base_t<SparseMat>* = nullptr>
static inline auto apply(const SparseMat& x) {
auto ret = x.eval();
for (Eigen::Index k = 0; k < x.outerSize(); ++k) {
for (typename SparseMat::InnerIterator it(x, k), ret_it(ret, k); it;
++it, ++ret_it) {
ret_it.valueRef() = apply_scalar_unary<F, scalar_t>::apply(it.value());
}
}
return ret;
}

/**
* Return type for applying the function elementwise to a matrix
* expression template of type T.
Expand All @@ -82,8 +148,8 @@ struct apply_scalar_unary<F, T, require_eigen_t<T>> {
*
* @tparam F Type of function defining static apply function.
*/
template <typename F, typename T>
struct apply_scalar_unary<F, T, require_floating_point_t<T>> {
template <typename F, typename T, bool ApplyZero>
struct apply_scalar_unary<F, T, ApplyZero, require_floating_point_t<T>> {
/**
* The return type, double.
*/
Expand All @@ -107,8 +173,8 @@ struct apply_scalar_unary<F, T, require_floating_point_t<T>> {
*
* @tparam F Type of function defining static apply function.
*/
template <typename F, typename T>
struct apply_scalar_unary<F, T, require_complex_t<T>> {
template <typename F, typename T, bool ApplyZero>
struct apply_scalar_unary<F, T, ApplyZero, require_complex_t<T>> {
/**
* The return type, double.
*/
Expand All @@ -134,8 +200,8 @@ struct apply_scalar_unary<F, T, require_complex_t<T>> {
*
* @tparam F Type of function defining static apply function.
*/
template <typename F, typename T>
struct apply_scalar_unary<F, T, require_integral_t<T>> {
template <typename F, typename T, bool ApplyZero>
struct apply_scalar_unary<F, T, ApplyZero, require_integral_t<T>> {
/**
* The return type, double.
*/
Expand All @@ -162,8 +228,8 @@ struct apply_scalar_unary<F, T, require_integral_t<T>> {
* @tparam F Class defining a static apply function.
* @tparam T Type of element contained in standard vector.
*/
template <typename F, typename T>
struct apply_scalar_unary<F, std::vector<T>> {
template <typename F, typename T, bool ApplyZero>
struct apply_scalar_unary<F, std::vector<T>, ApplyZero, void> {
/**
* Return type, which is calculated recursively as a standard
* vector of the return type of the contained type T.
Expand Down
Loading