Skip to content

Commit

Permalink
Remove out tuple arguments from mrattg-streaming
Browse files Browse the repository at this point in the history
Signed-off-by: Joseph Schuchart <[email protected]>
  • Loading branch information
devreal committed Jun 12, 2024
1 parent fdf5cc0 commit cdffced
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 73 deletions.
35 changes: 16 additions & 19 deletions examples/madness/mrattg_streaming.cc
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ template <Dimension NDIM> using ctlOut = ttg::Out<Key<NDIM>, void>;
std::mutex printer_guard;
template <typename keyT, typename valueT>
auto make_printer(const ttg::Edge<keyT, valueT>& in, const char* str = "", const bool doprint=true) {
auto func = [str,doprint](const keyT& key, auto& value, auto& out) {
auto func = [str,doprint](const keyT& key, auto& value) {
if (doprint) {
std::lock_guard<std::mutex> obolus(printer_guard);
std::cout << str << " (" << key << "," << value << ")" << std::endl;
Expand All @@ -305,7 +305,7 @@ auto make_printer(const ttg::Edge<keyT, valueT>& in, const char* str = "", const

template <Dimension NDIM>
auto make_start(const ctlEdge<NDIM>& ctl) {
auto func = [](const Key<NDIM>& key, std::tuple<ctlOut<NDIM>>& out) { ttg::sendk<0>(key, out); };
auto func = [](const Key<NDIM>& key) { ttg::sendk<0>(key); };
return ttg::make_tt<Key<NDIM>>(func, ttg::edges(), edges(ctl), "start", {}, {"control"});
}

Expand All @@ -320,7 +320,7 @@ auto make_project(functorT& f,
rnodeEdge<T,K,NDIM>& result,
const std::string& name = "project") {

auto F = [f, thresh](const Key<NDIM>& key, std::tuple<ctlOut<NDIM>, rnodeOut<T,K,NDIM>>& out) {
auto F = [f, thresh](const Key<NDIM>& key) {
FunctionReconstructedNodeWrap<T,K,NDIM> node(key); // Our eventual result
auto& coeffs = node.get().coeffs; // Need to clean up OO design

Expand All @@ -329,7 +329,7 @@ auto make_project(functorT& f,
/* TODO: children() returns an iteratable object but broadcast() expects a contiguous memory range.
We need to fix broadcast to support any ranges */
for (auto child : children(key)) bcast_keys.push_back(child);
ttg::broadcastk<0>(bcast_keys, out);
ttg::broadcastk<0>(bcast_keys);
coeffs = T(1e7); // set to obviously bad value to detect incorrect use
node.get().is_leaf = false;
}
Expand All @@ -342,10 +342,10 @@ auto make_project(functorT& f,
if (!node.get().is_leaf) {
std::vector<Key<NDIM>> bcast_keys;
for (auto child : children(key)) bcast_keys.push_back(child);
ttg::broadcastk<0>(bcast_keys, out);
ttg::broadcastk<0>(bcast_keys);
}
}
ttg::send<1>(key, std::move(node), out); // always produce a result
ttg::send<1>(key, std::move(node)); // always produce a result
};
ctlEdge<NDIM> refine("refine");
return ttg::make_tt(F, edges(fuse(refine, ctl)), ttg::edges(refine, result), name, {"control"}, {"refine", "result"});
Expand Down Expand Up @@ -386,8 +386,7 @@ namespace detail {
// Stream leaf nodes up the tree as a prelude to compressing
template <typename T, size_t K, Dimension NDIM>
void send_leaves_up(const Key<NDIM>& key,
const FunctionReconstructedNodeWrap<T,K,NDIM>& node,
std::tuple<rnodeOut<T,K,NDIM>, cnodeOut<T,K,NDIM>>& out) {
const FunctionReconstructedNodeWrap<T,K,NDIM>& node) {
//typename ::detail::tree_types<T,K,NDIM>::compress_out_type& out) {
//Removed const from here!!
node.get().sum = 0.0; //
Expand All @@ -403,22 +402,21 @@ void send_leaves_up(const Key<NDIM>& key,
} else {
//auto outs = ::mra::subtuple_to_array_of_ptrs<0,Key<NDIM>::num_children>(out);
//outs[key.childindex()]->send(key.parent(),node);
ttg::send<0>(key.parent(), node, out);
ttg::send<0>(key.parent(), node);
}
}
}

template <typename T, size_t K, Dimension NDIM>
void reduce_leaves(const Key<NDIM>& key, const FunctionReconstructedNodeWrap<T,K,NDIM>& node, std::tuple<rnodeOut<T,K,NDIM>>& out) {
void reduce_leaves(const Key<NDIM>& key, const FunctionReconstructedNodeWrap<T,K,NDIM>& node) {
//std::cout << "Reduce_leaves " << node.key.childindex() << " " << node.neighbor_sum[node.key.childindex()] << std::endl;
std::get<0>(out).send(key, node);
ttg::send<0>(key, node);
}

// With data streaming up the tree run compression
template <typename T, size_t K, Dimension NDIM>
void do_compress(const Key<NDIM>& key,
const FunctionReconstructedNodeWrap<T,K,NDIM> &in,
std::tuple<rnodeOut<T,K,NDIM>, cnodeOut<T,K,NDIM>> &out) {
const FunctionReconstructedNodeWrap<T,K,NDIM> &in) {
//const typename ::detail::tree_types<T,K,NDIM>::compress_in_type& in,
//typename ::detail::tree_types<T,K,NDIM>::compress_out_type& out) {
auto& child_slices = FunctionData<T,K,NDIM>::get_child_slices();
Expand Down Expand Up @@ -448,15 +446,15 @@ void do_compress(const Key<NDIM>& key,
p.get().sum = d.sumabssq() + sumsq; // Accumulate sumsq of difference coeffs from this node and children
//auto outs = ::mra::subtuple_to_array_of_ptrs<0,Key<NDIM>::num_children>(out);
//outs[key.childindex()]->send(key.parent(), p);
ttg::send<0>(key.parent(), std::move(p), out);
ttg::send<0>(key.parent(), std::move(p));
}
else {
std::cout << "At root of compressed tree: total normsq is " << sumsq + d.sumabssq() << std::endl;
}

// Send result to output tree
//send<Key<NDIM>::num_children>(key,result,out);
ttg::send<1>(key, std::move(result), out);
ttg::send<1>(key, std::move(result));
}


Expand All @@ -482,8 +480,7 @@ auto make_compress(rnodeEdge<T,K,NDIM>& in, cnodeEdge<T,K,NDIM>& out, const std:

template <typename T, size_t K, Dimension NDIM>
void do_reconstruct(const Key<NDIM>& key,
const std::tuple<FunctionCompressedNodeWrap<T,K,NDIM>&,FixedTensor<T,K,NDIM>&>& t,
std::tuple<ttg::Out<Key<NDIM>,FixedTensor<T,K,NDIM>>,rnodeOut<T,K,NDIM>>& out) {
const std::tuple<FunctionCompressedNodeWrap<T,K,NDIM>&,FixedTensor<T,K,NDIM>&>& t) {
const auto& child_slices = FunctionData<T,K,NDIM>::get_child_slices();
auto& node = std::get<0>(t);
const auto& from_parent = std::get<1>(t);
Expand Down Expand Up @@ -515,8 +512,8 @@ void do_reconstruct(const Key<NDIM>& key,
bcast_keys[0].push_back(child);
}
}
ttg::broadcast<0>(bcast_keys[0], r.get().coeffs, out);
ttg::broadcast<1>(bcast_keys[1], std::move(r), out);
ttg::broadcast<0>(bcast_keys[0], r.get().coeffs);
ttg::broadcast<1>(bcast_keys[1], std::move(r));
}

template <typename T, size_t K, Dimension NDIM>
Expand Down
28 changes: 14 additions & 14 deletions examples/mrafunctionnode.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,21 @@ namespace mra {
transform<T,T,T,2*K,NDIM>(in,hgT,out);

}

/// Applies inverse of two-scale filter (sum+difference coeffs of parent to sum coeffs of children)
template <typename T, size_t K, Dimension NDIM>
void unfilter(const FixedTensor<T,2*K,NDIM>& in, FixedTensor<T,2*K,NDIM>& out) {
auto& hg = FunctionData<T,K,NDIM>::get_hg();
transform<T,T,T,2*K,NDIM>(in,hg,out);

}

/// In given box return the truncation tolerance for given threshold
template <typename T, Dimension NDIM>
T truncate_tol(const Key<NDIM>& key, const T thresh) {
return thresh; // nothing clever for now
}

/// Computes square of distance between two coordinates
template <typename T>
T distancesq(const Coordinate<T,1>& p, const Coordinate<T,1>& q) {
Expand Down Expand Up @@ -63,7 +63,7 @@ namespace mra {
rsq[i] = xx*xx;
}
}

template <typename T, size_t N>
void distancesq(const Coordinate<T,3>& p, const SimpleTensor<T,2,N>& q, std::array<T,N>& rsq) {
const T x = p(0);
Expand All @@ -74,7 +74,7 @@ namespace mra {
rsq[i] = xx*xx + yy*yy;
}
}

template <typename T, size_t N>
void distancesq(const Coordinate<T,3>& p, const SimpleTensor<T,3,N>& q, std::array<T,N>& rsq) {
const T x = p(0);
Expand Down Expand Up @@ -104,15 +104,15 @@ namespace mra {
constexpr bool call_2d = (NDIM==2) && std::is_invocable_r<T, decltype(f), T, T>(); // f(x,y)
constexpr bool call_3d = (NDIM==3) && std::is_invocable_r<T, decltype(f), T, T, T>(); // f(x,y,z)
constexpr bool call_vec = std::is_invocable_r<void, decltype(f), SimpleTensor<T,NDIM,K2NDIM>, std::array<T,K2NDIM>&>(); // vector API

static_assert(call_coord || call_1d || call_2d || call_3d || call_vec, "no working call");

if constexpr (call_1d || call_2d || call_3d || call_vec) {
SimpleTensor<T,NDIM,K2NDIM> xvec;
SimpleTensor<T,NDIM,K2NDIM> xvec;
make_xvec(x,xvec);
if constexpr (call_vec) {
f(xvec,values.data());
}
}
else if constexpr (call_1d || call_2d || call_3d) {
eval_cube_vec(f, xvec, values);
}
Expand All @@ -125,20 +125,20 @@ namespace mra {
}
}
}

/// Project the scaling coefficients using screening and test norm of difference coeffs. Return true if difference coeffs negligible.
template <typename functorT, typename T, size_t K, Dimension NDIM>
bool fcoeffs(const functorT& f, const Key<NDIM>& key, const T thresh, FixedTensor<T,K,NDIM>& s) {
bool status;

if (is_negligible(f,Domain<NDIM>:: template bounding_box<T>(key),truncate_tol(key,thresh))) {
s = 0.0;
status = true;
}
else {
auto& child_slices = FunctionData<T,K,NDIM>::get_child_slices();
auto& phibar = FunctionData<T,K,NDIM>::get_phibar();

FixedTensor<T,2*K,NDIM> values;
{
FixedTensor<T,K,NDIM> child_values;
Expand Down Expand Up @@ -179,7 +179,7 @@ namespace mra {
std::array<bool, 1 << NDIM> is_neighbor_leaf;
std::array<T, 1 << NDIM> neighbor_sum;
};

template <typename T, size_t K, Dimension NDIM>
class FunctionCompressedNode {
public: // temporarily make everything public while we figure out what we are doing
Expand All @@ -192,7 +192,7 @@ namespace mra {
T normf() const {return coeffs.normf();}
bool has_children(size_t childindex) const {assert(childindex<Key<NDIM>::num_children); return !is_leaf[childindex];}
};

template <typename T, size_t K, Dimension NDIM, typename ostream>
ostream& operator<<(ostream& s, const FunctionReconstructedNode<T,K,NDIM>& node) {
s << "FunctionReconstructedNode(" << node.key << "," << node.is_leaf << "," << node.normf() << ")";
Expand All @@ -206,5 +206,5 @@ namespace mra {
}

}

#endif
Loading

0 comments on commit cdffced

Please sign in to comment.