diff --git a/include/oneapi/tbb/collaborative_call_once.h b/include/oneapi/tbb/collaborative_call_once.h index e3742347f1..18e3bbb245 100644 --- a/include/oneapi/tbb/collaborative_call_once.h +++ b/include/oneapi/tbb/collaborative_call_once.h @@ -172,7 +172,7 @@ class collaborative_once_flag : no_copy { spin_wait_until_eq(m_state, expected); } while (!m_state.compare_exchange_strong(expected, desired)); } - + template void do_collaborative_call_once(Fn&& f) { std::uintptr_t expected = m_state.load(std::memory_order_acquire); diff --git a/include/oneapi/tbb/detail/_flow_graph_body_impl.h b/include/oneapi/tbb/detail/_flow_graph_body_impl.h index 8ac11211f6..7a6a1cf43c 100644 --- a/include/oneapi/tbb/detail/_flow_graph_body_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_body_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif -// included in namespace tbb::detail::d1 (in flow_graph.h) +// included in namespace tbb::detail::d2 (in flow_graph.h) typedef std::uint64_t tag_value; @@ -53,7 +53,7 @@ namespace graph_policy_namespace { // K == type of field used for key-matching. Each tag-matching port will be provided // functor that, given an object accepted by the port, will return the /// field of type K being used for matching. - template::type > > + template::type > > __TBB_requires(tbb::detail::hash_compare) struct key_matching { typedef K key_type; @@ -77,7 +77,7 @@ template< typename Output > class input_body : no_assign { public: virtual ~input_body() {} - virtual Output operator()(flow_control& fc) = 0; + virtual Output operator()(d1::flow_control& fc) = 0; virtual input_body* clone() = 0; }; @@ -86,7 +86,7 @@ template< typename Output, typename Body> class input_body_leaf : public input_body { public: input_body_leaf( const Body &_body ) : body(_body) { } - Output operator()(flow_control& fc) override { return body(fc); } + Output operator()(d1::flow_control& fc) override { return body(fc); } input_body_leaf* clone() override { return new input_body_leaf< Output, Body >(body); } @@ -249,12 +249,12 @@ template< typename NodeType > class forward_task_bypass : public graph_task { NodeType &my_node; public: - forward_task_bypass( graph& g, small_object_allocator& allocator, NodeType &n + forward_task_bypass( graph& g, d1::small_object_allocator& allocator, NodeType &n , node_priority_t node_priority = no_priority ) : graph_task(g, allocator, node_priority), my_node(n) {} - task* execute(execution_data& ed) override { + d1::task* execute(d1::execution_data& ed) override { graph_task* next_task = my_node.forward_task(); if (SUCCESSFULLY_ENQUEUED == next_task) next_task = nullptr; @@ -264,7 +264,7 @@ class forward_task_bypass : public graph_task { return next_task; } - task* cancel(execution_data& ed) override { + d1::task* cancel(d1::execution_data& ed) override { finalize(ed); return nullptr; } @@ -278,12 +278,12 @@ class apply_body_task_bypass : public graph_task { Input my_input; public: - apply_body_task_bypass( graph& g, small_object_allocator& allocator, NodeType &n, const Input &i + apply_body_task_bypass( graph& g, d1::small_object_allocator& allocator, NodeType &n, const Input &i , node_priority_t node_priority = no_priority ) : graph_task(g, allocator, node_priority), my_node(n), my_input(i) {} - task* execute(execution_data& ed) override { + d1::task* execute(d1::execution_data& ed) override { graph_task* next_task = my_node.apply_body_bypass( my_input ); if (SUCCESSFULLY_ENQUEUED == next_task) next_task = nullptr; @@ -293,7 +293,7 @@ class apply_body_task_bypass : public graph_task { return next_task; } - task* cancel(execution_data& ed) override { + d1::task* cancel(d1::execution_data& ed) override { finalize(ed); return nullptr; } @@ -304,10 +304,10 @@ template< typename NodeType > class input_node_task_bypass : public graph_task { NodeType &my_node; public: - input_node_task_bypass( graph& g, small_object_allocator& allocator, NodeType &n ) + input_node_task_bypass( graph& g, d1::small_object_allocator& allocator, NodeType &n ) : graph_task(g, allocator), my_node(n) {} - task* execute(execution_data& ed) override { + d1::task* execute(d1::execution_data& ed) override { graph_task* next_task = my_node.apply_body_bypass( ); if (SUCCESSFULLY_ENQUEUED == next_task) next_task = nullptr; @@ -317,7 +317,7 @@ class input_node_task_bypass : public graph_task { return next_task; } - task* cancel(execution_data& ed) override { + d1::task* cancel(d1::execution_data& ed) override { finalize(ed); return nullptr; } diff --git a/include/oneapi/tbb/detail/_flow_graph_cache_impl.h b/include/oneapi/tbb/detail/_flow_graph_cache_impl.h index 059f198055..69625408f6 100644 --- a/include/oneapi/tbb/detail/_flow_graph_cache_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_cache_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif -// included in namespace tbb::detail::d1 (in flow_graph.h) +// included in namespace tbb::detail::d2 (in flow_graph.h) //! A node_cache maintains a std::queue of elements of type T. Each operation is protected by a lock. template< typename T, typename M=spin_mutex > diff --git a/include/oneapi/tbb/detail/_flow_graph_impl.h b/include/oneapi/tbb/detail/_flow_graph_impl.h index 8207667f37..5d79a5bf08 100644 --- a/include/oneapi/tbb/detail/_flow_graph_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,7 +30,7 @@ namespace tbb { namespace detail { -namespace d1 { +namespace d2 { class graph_task; static graph_task* const SUCCESSFULLY_ENQUEUED = (graph_task*)-1; @@ -123,27 +123,24 @@ void enqueue_in_graph_arena(graph &g, graph_task& arena_task); class graph; //! Base class for tasks generated by graph nodes. -class graph_task : public task { +class graph_task : public d1::task { public: - graph_task(graph& g, small_object_allocator& allocator - , node_priority_t node_priority = no_priority - ) - : my_graph(g) - , priority(node_priority) - , my_allocator(allocator) - {} + graph_task(graph& g, d1::small_object_allocator& allocator, + node_priority_t node_priority = no_priority); + graph& my_graph; // graph instance the task belongs to // TODO revamp: rename to my_priority node_priority_t priority; template - void destruct_and_deallocate(const execution_data& ed); + void destruct_and_deallocate(const d1::execution_data& ed); protected: template - void finalize(const execution_data& ed); + void finalize(const d1::execution_data& ed); private: // To organize task_list graph_task* my_next{ nullptr }; - small_object_allocator my_allocator; + d1::small_object_allocator my_allocator; + d1::wait_tree_vertex_interface* my_reference_vertex; // TODO revamp: elaborate internal interfaces to avoid friends declarations friend class graph_task_list; friend graph_task* prioritize_task(graph& g, graph_task& gt); @@ -157,18 +154,18 @@ struct graph_task_comparator { typedef tbb::concurrent_priority_queue graph_task_priority_queue_t; -class priority_task_selector : public task { +class priority_task_selector : public d1::task { public: - priority_task_selector(graph_task_priority_queue_t& priority_queue, small_object_allocator& allocator) + priority_task_selector(graph_task_priority_queue_t& priority_queue, d1::small_object_allocator& allocator) : my_priority_queue(priority_queue), my_allocator(allocator), my_task() {} - task* execute(execution_data& ed) override { + task* execute(d1::execution_data& ed) override { next_task(); __TBB_ASSERT(my_task, nullptr); task* t_next = my_task->execute(ed); my_allocator.delete_object(this, ed); return t_next; } - task* cancel(execution_data& ed) override { + task* cancel(d1::execution_data& ed) override { if (!my_task) { next_task(); } @@ -190,7 +187,7 @@ class priority_task_selector : public task { } graph_task_priority_queue_t& my_priority_queue; - small_object_allocator my_allocator; + d1::small_object_allocator my_allocator; graph_task* my_task; }; @@ -281,7 +278,7 @@ class graph : no_copy, public graph_proxy { caught_exception = false; try_call([this] { my_task_arena->execute([this] { - wait(my_wait_context, *my_context); + wait(my_wait_context_vertex.get_context(), *my_context); }); cancelled = my_context->is_group_execution_cancelled(); }).on_exception([this] { @@ -332,7 +329,7 @@ class graph : no_copy, public graph_proxy { bool exception_thrown() { return caught_exception; } private: - wait_context my_wait_context; + d1::wait_context_vertex my_wait_context_vertex; task_group_context *my_context; bool own_context; bool cancelled; @@ -349,19 +346,22 @@ class graph : no_copy, public graph_proxy { graph_task_priority_queue_t my_priority_queue; + d1::wait_context_vertex& get_wait_context_vertex() { return my_wait_context_vertex; } + friend void activate_graph(graph& g); friend void deactivate_graph(graph& g); friend bool is_graph_active(graph& g); + friend bool is_this_thread_in_graph_arena(graph& g); friend graph_task* prioritize_task(graph& g, graph_task& arena_task); friend void spawn_in_graph_arena(graph& g, graph_task& arena_task); friend void enqueue_in_graph_arena(graph &g, graph_task& arena_task); friend class task_arena_base; - + friend class graph_task; }; // class graph template -inline void graph_task::destruct_and_deallocate(const execution_data& ed) { +inline void graph_task::destruct_and_deallocate(const d1::execution_data& ed) { auto allocator = my_allocator; // TODO: investigate if direct call of derived destructor gives any benefits. this->~graph_task(); @@ -369,10 +369,27 @@ inline void graph_task::destruct_and_deallocate(const execution_data& ed) { } template -inline void graph_task::finalize(const execution_data& ed) { - graph& g = my_graph; +inline void graph_task::finalize(const d1::execution_data& ed) { + d1::wait_tree_vertex_interface* reference_vertex = my_reference_vertex; destruct_and_deallocate(ed); - g.release_wait(); + reference_vertex->release(); +} + +inline graph_task::graph_task(graph& g, d1::small_object_allocator& allocator, + node_priority_t node_priority) + : my_graph(g) + , priority(node_priority) + , my_allocator(allocator) +{ + // If the task is created by the thread outside the graph arena, the lifetime of the thread reference vertex + // may be shorter that the lifetime of the task, so thread reference vertex approach cannot be used + // and the task should be associated with the graph wait context itself + // TODO: consider how reference counting can be improved for such a use case. Most common example is the async_node + d1::wait_context_vertex* graph_wait_context_vertex = &my_graph.get_wait_context_vertex(); + my_reference_vertex = is_this_thread_in_graph_arena(g) ? r1::get_thread_reference_vertex(graph_wait_context_vertex) + : graph_wait_context_vertex; + __TBB_ASSERT(my_reference_vertex, nullptr); + my_reference_vertex->reserve(); } //******************************************************************************** @@ -424,15 +441,20 @@ inline bool is_graph_active(graph& g) { return g.my_is_active; } +inline bool is_this_thread_in_graph_arena(graph& g) { + __TBB_ASSERT(g.my_task_arena && g.my_task_arena->is_active(), nullptr); + return r1::execution_slot(*g.my_task_arena) != d1::slot_id(-1); +} + inline graph_task* prioritize_task(graph& g, graph_task& gt) { if( no_priority == gt.priority ) return > //! Non-preemptive priority pattern. The original task is submitted as a work item to the //! priority queue, and a new critical task is created to take and execute a work item with - //! the highest known priority. The reference counting responsibility is transferred (via - //! allocate_continuation) to the new task. - task* critical_task = gt.my_allocator.new_object(g.my_priority_queue, gt.my_allocator); + //! the highest known priority. The reference counting responsibility is transferred to + //! the new task. + d1::task* critical_task = gt.my_allocator.new_object(g.my_priority_queue, gt.my_allocator); __TBB_ASSERT( critical_task, "bad_alloc?" ); g.my_priority_queue.push(>); using tbb::detail::d1::submit; @@ -443,7 +465,7 @@ inline graph_task* prioritize_task(graph& g, graph_task& gt) { //! Spawns a task inside graph arena inline void spawn_in_graph_arena(graph& g, graph_task& arena_task) { if (is_graph_active(g)) { - task* gt = prioritize_task(g, arena_task); + d1::task* gt = prioritize_task(g, arena_task); if( !gt ) return; @@ -464,12 +486,12 @@ inline void enqueue_in_graph_arena(graph &g, graph_task& arena_task) { __TBB_ASSERT( g.my_task_arena && g.my_task_arena->is_active(), "Is graph's arena initialized and active?" ); // TODO revamp: decide on the approach that does not postpone critical task - if( task* gt = prioritize_task(g, arena_task) ) + if( d1::task* gt = prioritize_task(g, arena_task) ) submit( *gt, *g.my_task_arena, *g.my_context, /*as_critical=*/false); } } -} // namespace d1 +} // namespace d2 } // namespace detail } // namespace tbb diff --git a/include/oneapi/tbb/detail/_flow_graph_indexer_impl.h b/include/oneapi/tbb/detail/_flow_graph_indexer_impl.h index f4f55a6c7a..df083bd443 100644 --- a/include/oneapi/tbb/detail/_flow_graph_indexer_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_indexer_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2021 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif -// included in namespace tbb::detail::d1 +// included in namespace tbb::detail::d2 #include "_flow_graph_types_impl.h" @@ -118,7 +118,7 @@ }; typedef indexer_node_base class_type; - class indexer_node_base_operation : public aggregated_operation { + class indexer_node_base_operation : public d1::aggregated_operation { public: char type; union { @@ -132,9 +132,9 @@ my_succ(const_cast(&s)) {} }; - typedef aggregating_functor handler_type; - friend class aggregating_functor; - aggregator my_aggregator; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; void handle_operations(indexer_node_base_operation* op_list) { indexer_node_base_operation *current; diff --git a/include/oneapi/tbb/detail/_flow_graph_join_impl.h b/include/oneapi/tbb/detail/_flow_graph_join_impl.h index 5515421ede..fd401f31a1 100644 --- a/include/oneapi/tbb/detail/_flow_graph_join_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_join_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif -// included into namespace tbb::detail::d1 +// included into namespace tbb::detail::d2 struct forwarding_base : no_assign { forwarding_base(graph &g) : graph_ref(g) {} @@ -216,7 +216,7 @@ }; typedef reserving_port class_type; - class reserving_port_operation : public aggregated_operation { + class reserving_port_operation : public d1::aggregated_operation { public: char type; union { @@ -230,9 +230,9 @@ reserving_port_operation(op_type t) : type(char(t)) {} }; - typedef aggregating_functor handler_type; - friend class aggregating_functor; - aggregator my_aggregator; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; void handle_operations(reserving_port_operation* op_list) { reserving_port_operation *current; @@ -376,7 +376,7 @@ enum op_type { get__item, res_port, try__put_task }; - class queueing_port_operation : public aggregated_operation { + class queueing_port_operation : public d1::aggregated_operation { public: char type; T my_val; @@ -398,9 +398,9 @@ {} }; - typedef aggregating_functor handler_type; - friend class aggregating_functor; - aggregator my_aggregator; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; void handle_operations(queueing_port_operation* op_list) { queueing_port_operation *current; @@ -541,7 +541,7 @@ enum op_type { try__put, get__item, res_port }; - class key_matching_port_operation : public aggregated_operation { + class key_matching_port_operation : public d1::aggregated_operation { public: char type; input_type my_val; @@ -556,9 +556,9 @@ key_matching_port_operation(op_type t) : type(char(t)), my_arg(nullptr) {} }; - typedef aggregating_functor handler_type; - friend class aggregating_functor; - aggregator my_aggregator; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; void handle_operations(key_matching_port_operation* op_list) { key_matching_port_operation *current; @@ -695,10 +695,9 @@ graph_task* decrement_port_count() override { if(ports_with_no_inputs.fetch_sub(1) == 1) { if(is_graph_active(this->graph_ref)) { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass task_type; graph_task* t = allocator.new_object(graph_ref, allocator, *my_node); - graph_ref.reserve_wait(); spawn_in_graph_arena(this->graph_ref, *t); } } @@ -768,10 +767,9 @@ { if(ports_with_no_items.fetch_sub(1) == 1) { if(is_graph_active(this->graph_ref)) { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass task_type; graph_task* t = allocator.new_object(graph_ref, allocator, *my_node); - graph_ref.reserve_wait(); if( !handle_task ) return t; spawn_in_graph_arena(this->graph_ref, *t); @@ -854,7 +852,7 @@ enum op_type { res_count, inc_count, may_succeed, try_make }; typedef join_node_FE, InputTuple, OutputTuple> class_type; - class key_matching_FE_operation : public aggregated_operation { + class key_matching_FE_operation : public d1::aggregated_operation { public: char type; unref_key_type my_val; @@ -868,9 +866,9 @@ key_matching_FE_operation(op_type t) : type(char(t)), my_output(nullptr), bypass_t(nullptr) {} }; - typedef aggregating_functor handler_type; - friend class aggregating_functor; - aggregator my_aggregator; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator my_aggregator; // called from aggregator, so serialized // returns a task pointer if the a task would have been enqueued but we asked that @@ -884,10 +882,9 @@ if(join_helper::get_items(my_inputs, l_out)) { // <== call back this->push_back(l_out); if(do_fwd) { // we enqueue if receiving an item from predecessor, not if successor asks for item - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass task_type; rtask = allocator.new_object(this->graph_ref, allocator, *my_node); - this->graph_ref.reserve_wait(); do_fwd = false; } // retire the input values @@ -1044,7 +1041,7 @@ }; typedef join_node_base class_type; - class join_node_base_operation : public aggregated_operation { + class join_node_base_operation : public d1::aggregated_operation { public: char type; union { @@ -1059,10 +1056,10 @@ join_node_base_operation(op_type t) : type(char(t)), bypass_t(nullptr) {} }; - typedef aggregating_functor handler_type; - friend class aggregating_functor; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; bool forwarder_busy; - aggregator my_aggregator; + d1::aggregator my_aggregator; void handle_operations(join_node_base_operation* op_list) { join_node_base_operation *current; @@ -1073,10 +1070,9 @@ case reg_succ: { my_successors.register_successor(*(current->my_succ)); if(tuple_build_may_succeed() && !forwarder_busy && is_graph_active(my_graph)) { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass< join_node_base > task_type; graph_task* t = allocator.new_object(my_graph, allocator, *this); - my_graph.reserve_wait(); spawn_in_graph_arena(my_graph, *t); forwarder_busy = true; } diff --git a/include/oneapi/tbb/detail/_flow_graph_node_impl.h b/include/oneapi/tbb/detail/_flow_graph_node_impl.h index b79c53ddbf..3bb9b7d788 100644 --- a/include/oneapi/tbb/detail/_flow_graph_node_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_node_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -150,7 +150,7 @@ class function_input_base : public receiver, no_assign { friend class apply_body_task_bypass< class_type, input_type >; friend class forward_task_bypass< class_type >; - class operation_type : public aggregated_operation< operation_type > { + class operation_type : public d1::aggregated_operation< operation_type > { public: char type; union { @@ -164,9 +164,9 @@ class function_input_base : public receiver, no_assign { }; bool forwarder_busy; - typedef aggregating_functor handler_type; - friend class aggregating_functor; - aggregator< handler_type, operation_type > my_aggregator; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator< handler_type, operation_type > my_aggregator; graph_task* perform_queued_requests() { graph_task* new_task = nullptr; @@ -300,10 +300,9 @@ class function_input_base : public receiver, no_assign { return nullptr; } // TODO revamp: extract helper for common graph task allocation part - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef apply_body_task_bypass task_type; graph_task* t = allocator.new_object( my_graph_ref, allocator, *this, input, my_priority ); - graph_reference().reserve_wait(); return t; } @@ -327,10 +326,9 @@ class function_input_base : public receiver, no_assign { if (!is_graph_active(my_graph_ref)) { return nullptr; } - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass task_type; graph_task* t = allocator.new_object( graph_reference(), allocator, *this, my_priority ); - graph_reference().reserve_wait(); return t; } @@ -680,10 +678,9 @@ class continue_input : public continue_receiver { return apply_body_bypass( continue_msg() ); } else { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef apply_body_task_bypass task_type; graph_task* t = allocator.new_object( graph_reference(), allocator, *this, continue_msg(), my_priority ); - graph_reference().reserve_wait(); return t; } } diff --git a/include/oneapi/tbb/detail/_flow_graph_node_set_impl.h b/include/oneapi/tbb/detail/_flow_graph_node_set_impl.h index ce867121f9..8440bd7008 100644 --- a/include/oneapi/tbb/detail/_flow_graph_node_set_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_node_set_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2020-2021 Intel Corporation + Copyright (c) 2020-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif -// Included in namespace tbb::detail::d1 (in flow_graph.h) +// Included in namespace tbb::detail::d2 (in flow_graph.h) #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET // Visual Studio 2019 reports an error while calling predecessor_selector::get and successor_selector::get diff --git a/include/oneapi/tbb/detail/_flow_graph_nodes_deduction.h b/include/oneapi/tbb/detail/_flow_graph_nodes_deduction.h index 8c20993795..47ecfb2a84 100644 --- a/include/oneapi/tbb/detail/_flow_graph_nodes_deduction.h +++ b/include/oneapi/tbb/detail/_flow_graph_nodes_deduction.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2021 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ namespace tbb { namespace detail { -namespace d1 { +namespace d2 { template struct declare_body_types { @@ -51,10 +51,10 @@ template struct body_types : declare_body_types {}; template -struct body_types : declare_body_types {}; +struct body_types : declare_body_types {}; template -struct body_types : declare_body_types {}; +struct body_types : declare_body_types {}; template struct body_types : declare_body_types {}; @@ -63,7 +63,7 @@ template struct body_types : declare_body_types {}; template -struct body_types : declare_body_types {}; +struct body_types : declare_body_types {}; template using input_t = typename body_types::input_type; @@ -100,7 +100,7 @@ decltype(decide_on_operator_overload(std::declval())) decide_on_callable_t template input_node(GraphOrSet&&, Body) ->input_node(0))>>; - + #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET template @@ -268,7 +268,7 @@ template write_once_node(const NodeSet&) ->write_once_node>; #endif // __TBB_PREVIEW_FLOW_GRAPH_NODE_SET -} // namespace d1 +} // namespace d2 } // namespace detail } // namespace tbb diff --git a/include/oneapi/tbb/detail/_flow_graph_trace_impl.h b/include/oneapi/tbb/detail/_flow_graph_trace_impl.h index a161dd0362..74ebf08456 100644 --- a/include/oneapi/tbb/detail/_flow_graph_trace_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_trace_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ namespace tbb { namespace detail { -namespace d1 { +namespace d2 { template< typename T > class sender; template< typename T > class receiver; @@ -44,29 +44,29 @@ template< typename T > class receiver; static inline void fgt_alias_port(void *node, void *p, bool visible) { if(visible) - itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_NODE ); + itt_relation_add( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_NODE ); else - itt_relation_add( ITT_DOMAIN_FLOW, p, FLOW_NODE, __itt_relation_is_child_of, node, FLOW_NODE ); + itt_relation_add( d1::ITT_DOMAIN_FLOW, p, FLOW_NODE, __itt_relation_is_child_of, node, FLOW_NODE ); } static inline void fgt_composite ( void* codeptr, void *node, void *graph ) { - itt_make_task_group( ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_COMPOSITE_NODE ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_COMPOSITE_NODE ); suppress_unused_warning( codeptr ); #if __TBB_FLOW_TRACE_CODEPTR if (codeptr != nullptr) { - register_node_addr(ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + register_node_addr(d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); } #endif } static inline void fgt_internal_alias_input_port( void *node, void *p, string_resource_index name_index ) { - itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); - itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_INPUT_PORT ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_INPUT_PORT ); } static inline void fgt_internal_alias_output_port( void *node, void *p, string_resource_index name_index ) { - itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index ); - itt_relation_add( ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_OUTPUT_PORT ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index ); + itt_relation_add( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, __itt_relation_is_parent_of, p, FLOW_OUTPUT_PORT ); } template @@ -109,15 +109,15 @@ struct fgt_internal_output_alias_helper { }; static inline void fgt_internal_create_input_port( void *node, void *p, string_resource_index name_index ) { - itt_make_task_group( ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, p, FLOW_INPUT_PORT, node, FLOW_NODE, name_index ); } static inline void fgt_internal_create_output_port( void* codeptr, void *node, void *p, string_resource_index name_index ) { - itt_make_task_group(ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index); + itt_make_task_group(d1::ITT_DOMAIN_FLOW, p, FLOW_OUTPUT_PORT, node, FLOW_NODE, name_index); suppress_unused_warning( codeptr ); #if __TBB_FLOW_TRACE_CODEPTR if (codeptr != nullptr) { - register_node_addr(ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); + register_node_addr(d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, CODE_ADDRESS, &codeptr); } #endif } @@ -167,40 +167,40 @@ struct fgt_internal_output_helper { template< typename NodeType > void fgt_multioutput_node_desc( const NodeType *node, const char *desc ) { void *addr = (void *)( static_cast< receiver< typename NodeType::input_type > * >(const_cast< NodeType *>(node)) ); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); } template< typename NodeType > void fgt_multiinput_multioutput_node_desc( const NodeType *node, const char *desc ) { void *addr = const_cast(node); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); } template< typename NodeType > static inline void fgt_node_desc( const NodeType *node, const char *desc ) { void *addr = (void *)( static_cast< sender< typename NodeType::output_type > * >(const_cast< NodeType *>(node)) ); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_NODE, FLOW_OBJECT_NAME, desc ); } static inline void fgt_graph_desc( const void *g, const char *desc ) { void *addr = const_cast< void *>(g); - itt_metadata_str_add( ITT_DOMAIN_FLOW, addr, FLOW_GRAPH, FLOW_OBJECT_NAME, desc ); + itt_metadata_str_add( d1::ITT_DOMAIN_FLOW, addr, FLOW_GRAPH, FLOW_OBJECT_NAME, desc ); } static inline void fgt_body( void *node, void *body ) { - itt_relation_add( ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE ); + itt_relation_add( d1::ITT_DOMAIN_FLOW, body, FLOW_BODY, __itt_relation_is_child_of, node, FLOW_NODE ); } template< int N, typename PortsTuple > static inline void fgt_multioutput_node(void* codeptr, string_resource_index t, void *g, void *input_port, PortsTuple &ports ) { - itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); fgt_internal_output_helper::register_port(codeptr, input_port, ports ); } template< int N, typename PortsTuple > static inline void fgt_multioutput_node_with_body( void* codeptr, string_resource_index t, void *g, void *input_port, PortsTuple &ports, void *body ) { - itt_make_task_group( ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, input_port, FLOW_NODE, g, FLOW_GRAPH, t ); fgt_internal_create_input_port( input_port, input_port, FLOW_INPUT_PORT_0 ); fgt_internal_output_helper::register_port( codeptr, input_port, ports ); fgt_body( input_port, body ); @@ -208,28 +208,28 @@ static inline void fgt_multioutput_node_with_body( void* codeptr, string_resourc template< int N, typename PortsTuple > static inline void fgt_multiinput_node( void* codeptr, string_resource_index t, void *g, PortsTuple &ports, void *output_port) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); fgt_internal_input_helper::register_port( output_port, ports ); } static inline void fgt_multiinput_multioutput_node( void* codeptr, string_resource_index t, void *n, void *g ) { - itt_make_task_group( ITT_DOMAIN_FLOW, n, FLOW_NODE, g, FLOW_GRAPH, t ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, n, FLOW_NODE, g, FLOW_GRAPH, t ); suppress_unused_warning( codeptr ); #if __TBB_FLOW_TRACE_CODEPTR if (codeptr != nullptr) { - register_node_addr(ITT_DOMAIN_FLOW, n, FLOW_NODE, CODE_ADDRESS, &codeptr); + register_node_addr(d1::ITT_DOMAIN_FLOW, n, FLOW_NODE, CODE_ADDRESS, &codeptr); } #endif } static inline void fgt_node( void* codeptr, string_resource_index t, void *g, void *output_port ) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); fgt_internal_create_output_port( codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); } static void fgt_node_with_body( void* codeptr, string_resource_index t, void *g, void *output_port, void *body ) { - itt_make_task_group( ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, output_port, FLOW_NODE, g, FLOW_GRAPH, t ); fgt_internal_create_output_port(codeptr, output_port, output_port, FLOW_OUTPUT_PORT_0 ); fgt_body( output_port, body ); } @@ -251,47 +251,47 @@ static inline void fgt_node( void* codeptr, string_resource_index t, void *g, v } static inline void fgt_make_edge( void *output_port, void *input_port ) { - itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_predecessor_to, input_port, FLOW_INPUT_PORT); + itt_relation_add( d1::ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_predecessor_to, input_port, FLOW_INPUT_PORT); } static inline void fgt_remove_edge( void *output_port, void *input_port ) { - itt_relation_add( ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_sibling_of, input_port, FLOW_INPUT_PORT); + itt_relation_add( d1::ITT_DOMAIN_FLOW, output_port, FLOW_OUTPUT_PORT, __itt_relation_is_sibling_of, input_port, FLOW_INPUT_PORT); } static inline void fgt_graph( void *g ) { - itt_make_task_group( ITT_DOMAIN_FLOW, g, FLOW_GRAPH, nullptr, FLOW_NULL, FLOW_GRAPH ); + itt_make_task_group( d1::ITT_DOMAIN_FLOW, g, FLOW_GRAPH, nullptr, FLOW_NULL, FLOW_GRAPH ); } static inline void fgt_begin_body( void *body ) { - itt_task_begin( ITT_DOMAIN_FLOW, body, FLOW_BODY, nullptr, FLOW_NULL, FLOW_BODY ); + itt_task_begin( d1::ITT_DOMAIN_FLOW, body, FLOW_BODY, nullptr, FLOW_NULL, FLOW_BODY ); } static inline void fgt_end_body( void * ) { - itt_task_end( ITT_DOMAIN_FLOW ); + itt_task_end( d1::ITT_DOMAIN_FLOW ); } static inline void fgt_async_try_put_begin( void *node, void *port ) { - itt_task_begin( ITT_DOMAIN_FLOW, port, FLOW_OUTPUT_PORT, node, FLOW_NODE, FLOW_OUTPUT_PORT ); + itt_task_begin( d1::ITT_DOMAIN_FLOW, port, FLOW_OUTPUT_PORT, node, FLOW_NODE, FLOW_OUTPUT_PORT ); } static inline void fgt_async_try_put_end( void *, void * ) { - itt_task_end( ITT_DOMAIN_FLOW ); + itt_task_end( d1::ITT_DOMAIN_FLOW ); } static inline void fgt_async_reserve( void *node, void *graph ) { - itt_region_begin( ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_NULL ); + itt_region_begin( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE, graph, FLOW_GRAPH, FLOW_NULL ); } static inline void fgt_async_commit( void *node, void * /*graph*/) { - itt_region_end( ITT_DOMAIN_FLOW, node, FLOW_NODE ); + itt_region_end( d1::ITT_DOMAIN_FLOW, node, FLOW_NODE ); } static inline void fgt_reserve_wait( void *graph ) { - itt_region_begin( ITT_DOMAIN_FLOW, graph, FLOW_GRAPH, nullptr, FLOW_NULL, FLOW_NULL ); + itt_region_begin( d1::ITT_DOMAIN_FLOW, graph, FLOW_GRAPH, nullptr, FLOW_NULL, FLOW_NULL ); } static inline void fgt_release_wait( void *graph ) { - itt_region_end( ITT_DOMAIN_FLOW, graph, FLOW_GRAPH ); + itt_region_end( d1::ITT_DOMAIN_FLOW, graph, FLOW_GRAPH ); } #else // TBB_USE_PROFILING_TOOLS @@ -357,7 +357,7 @@ struct fgt_internal_output_alias_helper { #endif // TBB_USE_PROFILING_TOOLS -} // d1 +} // d2 } // namespace detail } // namespace tbb diff --git a/include/oneapi/tbb/detail/_flow_graph_types_impl.h b/include/oneapi/tbb/detail/_flow_graph_types_impl.h index 4827551d85..002912aa0b 100644 --- a/include/oneapi/tbb/detail/_flow_graph_types_impl.h +++ b/include/oneapi/tbb/detail/_flow_graph_types_impl.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ #error Do not #include this internal file directly; use public TBB headers instead. #endif -// included in namespace tbb::detail::d1 +// included in namespace tbb::detail::d2 // the change to key_matching (adding a K and KHash template parameter, making it a class) // means we have to pass this data to the key_matching_port. All the ports have only one diff --git a/include/oneapi/tbb/detail/_pipeline_filters.h b/include/oneapi/tbb/detail/_pipeline_filters.h index 46e7b95d6c..8121946729 100644 --- a/include/oneapi/tbb/detail/_pipeline_filters.h +++ b/include/oneapi/tbb/detail/_pipeline_filters.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -32,6 +32,12 @@ namespace d1 { class base_filter; } +namespace d2 { +template +__TBB_requires(std::copyable) +class input_node; +} + namespace r1 { TBB_EXPORT void __TBB_EXPORTED_FUNC set_end_of_input(d1::base_filter&); class pipeline; @@ -131,7 +137,7 @@ class flow_control { template friend class concrete_filter; template __TBB_requires(std::copyable) - friend class input_node; + friend class d2::input_node; public: void stop() { is_pipeline_stopped = true; } }; diff --git a/include/oneapi/tbb/detail/_task.h b/include/oneapi/tbb/detail/_task.h index 1fa75281c8..410facc4a4 100644 --- a/include/oneapi/tbb/detail/_task.h +++ b/include/oneapi/tbb/detail/_task.h @@ -44,6 +44,7 @@ class wait_context; class task_group_context; struct execution_data; class wait_tree_vertex_interface; +class task_arena_base; } namespace d2 { @@ -58,6 +59,7 @@ TBB_EXPORT void __TBB_EXPORTED_FUNC spawn(d1::task& t, d1::task_group_context& c TBB_EXPORT void __TBB_EXPORTED_FUNC execute_and_wait(d1::task& t, d1::task_group_context& t_ctx, d1::wait_context&, d1::task_group_context& w_ctx); TBB_EXPORT void __TBB_EXPORTED_FUNC wait(d1::wait_context&, d1::task_group_context& ctx); TBB_EXPORT d1::slot_id __TBB_EXPORTED_FUNC execution_slot(const d1::execution_data*); +TBB_EXPORT d1::slot_id __TBB_EXPORTED_FUNC execution_slot(const d1::task_arena_base&); TBB_EXPORT d1::task_group_context* __TBB_EXPORTED_FUNC current_context(); TBB_EXPORT d1::wait_tree_vertex_interface* get_thread_reference_vertex(d1::wait_tree_vertex_interface* wc); diff --git a/include/oneapi/tbb/flow_graph.h b/include/oneapi/tbb/flow_graph.h index 2df4b14050..2857a41254 100644 --- a/include/oneapi/tbb/flow_graph.h +++ b/include/oneapi/tbb/flow_graph.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -70,7 +70,7 @@ namespace tbb { namespace detail { -namespace d1 { +namespace d2 { //! An enumeration the provides the two most common concurrency levels: unlimited and serial enum concurrency { unlimited = 0, serial = 1 }; @@ -81,19 +81,19 @@ struct null_type {}; //! An empty class used for messages that mean "I'm done" class continue_msg {}; -} // namespace d1 +} // namespace d2 #if __TBB_CPP20_CONCEPTS_PRESENT namespace d0 { template -concept node_body_return_type = std::same_as || +concept node_body_return_type = std::same_as || std::convertible_to; // TODO: consider using std::invocable here template concept continue_node_body = std::copy_constructible && - requires( Body& body, const tbb::detail::d1::continue_msg& v ) { + requires( Body& body, const tbb::detail::d2::continue_msg& v ) { { body(v) } -> node_body_return_type; }; @@ -129,7 +129,7 @@ concept async_node_body = std::copy_constructible && } // namespace d0 #endif // __TBB_CPP20_CONCEPTS_PRESENT -namespace d1 { +namespace d2 { //! Forward declaration section template< typename T > class sender; @@ -153,7 +153,7 @@ template struct node_set; #endif -} // namespace d1 +} // namespace d2 } // namespace detail } // namespace tbb @@ -162,7 +162,7 @@ template struct node_set; namespace tbb { namespace detail { -namespace d1 { +namespace d2 { static inline std::pair order_tasks(graph_task* first, graph_task* second) { if (second->priority > first->priority) @@ -244,7 +244,8 @@ class receiver { //! Put an item to the receiver bool try_put( const T& t ) { - graph_task *res = try_put_task(t); + graph_task* res = try_put_task(t); + if (!res) return false; if (res != SUCCESSFULLY_ENQUEUED) spawn_in_graph_arena(graph_reference(), *res); return true; @@ -392,7 +393,7 @@ class continue_receiver : public receiver< continue_msg > { namespace tbb { namespace detail { -namespace d1 { +namespace d2 { #include "detail/_flow_graph_body_impl.h" #include "detail/_flow_graph_cache_impl.h" @@ -424,7 +425,7 @@ void graph_iterator::internal_forward() { } //! Constructs a graph with isolated task_group_context -inline graph::graph() : my_wait_context(0), my_nodes(nullptr), my_nodes_last(nullptr), my_task_arena(nullptr) { +inline graph::graph() : my_wait_context_vertex(0), my_nodes(nullptr), my_nodes_last(nullptr), my_task_arena(nullptr) { prepare_task_arena(); own_context = true; cancelled = false; @@ -435,7 +436,7 @@ inline graph::graph() : my_wait_context(0), my_nodes(nullptr), my_nodes_last(nul } inline graph::graph(task_group_context& use_this_context) : - my_wait_context(0), my_context(&use_this_context), my_nodes(nullptr), my_nodes_last(nullptr), my_task_arena(nullptr) { + my_wait_context_vertex(0), my_context(&use_this_context), my_nodes(nullptr), my_nodes_last(nullptr), my_task_arena(nullptr) { prepare_task_arena(); own_context = false; cancelled = false; @@ -454,13 +455,13 @@ inline graph::~graph() { } inline void graph::reserve_wait() { - my_wait_context.reserve(); + my_wait_context_vertex.reserve(); fgt_reserve_wait(this); } inline void graph::release_wait() { fgt_release_wait(this); - my_wait_context.release(); + my_wait_context_vertex.release(); } inline void graph::register_node(graph_node *n) { @@ -703,7 +704,7 @@ class input_node : public graph_node, public sender< Output > { return false; } if ( !my_has_cached_item ) { - flow_control control; + d1::flow_control control; fgt_begin_body( my_body ); @@ -722,10 +723,9 @@ class input_node : public graph_node, public sender< Output > { } graph_task* create_put_task() { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef input_node_task_bypass< input_node > task_type; graph_task* t = allocator.new_object(my_graph, allocator, *this); - my_graph.reserve_wait(); return t; } @@ -1168,7 +1168,7 @@ class buffer_node }; // implements the aggregator_operation concept - class buffer_operation : public aggregated_operation< buffer_operation > { + class buffer_operation : public d1::aggregated_operation< buffer_operation > { public: char type; T* elem; @@ -1183,9 +1183,9 @@ class buffer_node }; bool forwarder_busy; - typedef aggregating_functor handler_type; - friend class aggregating_functor; - aggregator< handler_type, buffer_operation> my_aggregator; + typedef d1::aggregating_functor handler_type; + friend class d1::aggregating_functor; + d1::aggregator< handler_type, buffer_operation> my_aggregator; virtual void handle_operations(buffer_operation *op_list) { handle_operations_impl(op_list, this); @@ -1218,9 +1218,8 @@ class buffer_node if(is_graph_active(this->my_graph)) { forwarder_busy = true; typedef forward_task_bypass task_type; - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; graph_task* new_task = allocator.new_object(graph_reference(), allocator, *this); - my_graph.reserve_wait(); // tmp should point to the last item handled by the aggregator. This is the operation // the handling thread enqueued. So modifying that record will be okay. // TODO revamp: check that the issue is still present @@ -1403,7 +1402,7 @@ class buffer_node It also calls r.remove_predecessor(*this) to remove this node as a predecessor. */ bool remove_successor( successor_type &r ) override { // TODO revamp: investigate why full qualification is necessary here - tbb::detail::d1::remove_predecessor(r, *this); + tbb::detail::d2::remove_predecessor(r, *this); buffer_operation op_data(rem_succ); op_data.r = &r; my_aggregator.execute(&op_data); @@ -1965,9 +1964,8 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > if ( check_conditions() ) { if ( is_graph_active(this->my_graph) ) { typedef forward_task_bypass> task_type; - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; graph_task* rtask = allocator.new_object( my_graph, allocator, *this ); - my_graph.reserve_wait(); spawn_in_graph_arena(graph_reference(), *rtask); } } @@ -1984,10 +1982,9 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > if (reserved) my_predecessors.try_release(); if ( check_conditions() ) { if ( is_graph_active(this->my_graph) ) { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass> task_type; graph_task* t = allocator.new_object(my_graph, allocator, *this); - my_graph.reserve_wait(); __TBB_ASSERT(!rval, "Have two tasks to handle"); return t; } @@ -2035,10 +2032,9 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > //spawn a forward task if this is the only successor if ( was_empty && !my_predecessors.empty() && my_count + my_tries < my_threshold ) { if ( is_graph_active(this->my_graph) ) { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass> task_type; graph_task* t = allocator.new_object(my_graph, allocator, *this); - my_graph.reserve_wait(); spawn_in_graph_arena(graph_reference(), *t); } } @@ -2049,7 +2045,7 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > /** r.remove_predecessor(*this) is also called. */ bool remove_successor( successor_type &r ) override { // TODO revamp: investigate why qualification is needed for remove_predecessor() call - tbb::detail::d1::remove_predecessor(r, *this); + tbb::detail::d2::remove_predecessor(r, *this); my_successors.remove_successor(r); return true; } @@ -2059,10 +2055,9 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > spin_mutex::scoped_lock lock(my_mutex); my_predecessors.add( src ); if ( my_count + my_tries < my_threshold && !my_successors.empty() && is_graph_active(this->my_graph) ) { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass> task_type; graph_task* t = allocator.new_object(my_graph, allocator, *this); - my_graph.reserve_wait(); spawn_in_graph_arena(graph_reference(), *t); } return true; @@ -2094,10 +2089,9 @@ class limiter_node : public graph_node, public receiver< T >, public sender< T > spin_mutex::scoped_lock lock(my_mutex); --my_tries; if (check_conditions() && is_graph_active(this->my_graph)) { - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef forward_task_bypass> task_type; rtask = allocator.new_object(my_graph, allocator, *this); - my_graph.reserve_wait(); } } else { @@ -3054,10 +3048,9 @@ class overwrite_node : public graph_node, public receiver, public sender { // because failed reserve does not mean that register_successor is not ready to put a message immediately. // We have some sort of infinite loop: reserving node tries to set pull state for the edge, // but overwrite_node tries to return push state back. That is why we have to break this loop with task creation. - small_object_allocator allocator{}; + d1::small_object_allocator allocator{}; typedef register_predecessor_task task_type; graph_task* t = allocator.new_object(graph_reference(), allocator, *this, s); - graph_reference().reserve_wait(); spawn_in_graph_arena( my_graph, *t ); } } else { @@ -3128,13 +3121,13 @@ class overwrite_node : public graph_node, public receiver, public sender { //! Breaks an infinite loop between the node reservation and register_successor call struct register_predecessor_task : public graph_task { register_predecessor_task( - graph& g, small_object_allocator& allocator, predecessor_type& owner, successor_type& succ) + graph& g, d1::small_object_allocator& allocator, predecessor_type& owner, successor_type& succ) : graph_task(g, allocator), o(owner), s(succ) {}; - task* execute(execution_data& ed) override { + d1::task* execute(d1::execution_data& ed) override { // TODO revamp: investigate why qualification is needed for register_successor() call - using tbb::detail::d1::register_predecessor; - using tbb::detail::d1::register_successor; + using tbb::detail::d2::register_predecessor; + using tbb::detail::d2::register_successor; if ( !register_predecessor(s, o) ) { register_successor(o, s); } @@ -3142,7 +3135,7 @@ class overwrite_node : public graph_node, public receiver, public sender { return nullptr; } - task* cancel(execution_data& ed) override { + d1::task* cancel(d1::execution_data& ed) override { finalize(ed); return nullptr; } @@ -3293,7 +3286,7 @@ inline void set_name(const async_node& node, const char * { fgt_multioutput_node_desc(&node, name); } -} // d1 +} // d2 } // detail } // tbb @@ -3304,56 +3297,56 @@ inline void set_name(const async_node& node, const char * namespace tbb { namespace flow { inline namespace v1 { - using detail::d1::receiver; - using detail::d1::sender; - - using detail::d1::serial; - using detail::d1::unlimited; - - using detail::d1::reset_flags; - using detail::d1::rf_reset_protocol; - using detail::d1::rf_reset_bodies; - using detail::d1::rf_clear_edges; - - using detail::d1::graph; - using detail::d1::graph_node; - using detail::d1::continue_msg; - - using detail::d1::input_node; - using detail::d1::function_node; - using detail::d1::multifunction_node; - using detail::d1::split_node; - using detail::d1::output_port; - using detail::d1::indexer_node; - using detail::d1::tagged_msg; - using detail::d1::cast_to; - using detail::d1::is_a; - using detail::d1::continue_node; - using detail::d1::overwrite_node; - using detail::d1::write_once_node; - using detail::d1::broadcast_node; - using detail::d1::buffer_node; - using detail::d1::queue_node; - using detail::d1::sequencer_node; - using detail::d1::priority_queue_node; - using detail::d1::limiter_node; - using namespace detail::d1::graph_policy_namespace; - using detail::d1::join_node; - using detail::d1::input_port; - using detail::d1::copy_body; - using detail::d1::make_edge; - using detail::d1::remove_edge; - using detail::d1::tag_value; - using detail::d1::composite_node; - using detail::d1::async_node; - using detail::d1::node_priority_t; - using detail::d1::no_priority; + using detail::d2::receiver; + using detail::d2::sender; + + using detail::d2::serial; + using detail::d2::unlimited; + + using detail::d2::reset_flags; + using detail::d2::rf_reset_protocol; + using detail::d2::rf_reset_bodies; + using detail::d2::rf_clear_edges; + + using detail::d2::graph; + using detail::d2::graph_node; + using detail::d2::continue_msg; + + using detail::d2::input_node; + using detail::d2::function_node; + using detail::d2::multifunction_node; + using detail::d2::split_node; + using detail::d2::output_port; + using detail::d2::indexer_node; + using detail::d2::tagged_msg; + using detail::d2::cast_to; + using detail::d2::is_a; + using detail::d2::continue_node; + using detail::d2::overwrite_node; + using detail::d2::write_once_node; + using detail::d2::broadcast_node; + using detail::d2::buffer_node; + using detail::d2::queue_node; + using detail::d2::sequencer_node; + using detail::d2::priority_queue_node; + using detail::d2::limiter_node; + using namespace detail::d2::graph_policy_namespace; + using detail::d2::join_node; + using detail::d2::input_port; + using detail::d2::copy_body; + using detail::d2::make_edge; + using detail::d2::remove_edge; + using detail::d2::tag_value; + using detail::d2::composite_node; + using detail::d2::async_node; + using detail::d2::node_priority_t; + using detail::d2::no_priority; #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET - using detail::d1::follows; - using detail::d1::precedes; - using detail::d1::make_node_set; - using detail::d1::make_edges; + using detail::d2::follows; + using detail::d2::precedes; + using detail::d2::make_node_set; + using detail::d2::make_edges; #endif } // v1 @@ -3362,7 +3355,7 @@ inline namespace v1 { using detail::d1::flow_control; namespace profiling { - using detail::d1::set_name; + using detail::d2::set_name; } // profiling } // tbb diff --git a/include/oneapi/tbb/flow_graph_abstractions.h b/include/oneapi/tbb/flow_graph_abstractions.h index 121f167c4d..329e75c43e 100644 --- a/include/oneapi/tbb/flow_graph_abstractions.h +++ b/include/oneapi/tbb/flow_graph_abstractions.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2021 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ namespace tbb { namespace detail { -namespace d1 { +namespace d2 { //! Pure virtual template classes that define interfaces for async communication class graph_proxy { @@ -43,7 +43,7 @@ class receiver_gateway : public graph_proxy { virtual bool try_put(const input_type&) = 0; }; -} // d1 +} // d2 } // detail diff --git a/src/tbb/arena.cpp b/src/tbb/arena.cpp index 0e7cf43c3b..a7985d826a 100644 --- a/src/tbb/arena.cpp +++ b/src/tbb/arena.cpp @@ -503,6 +503,7 @@ struct task_arena_impl { static void wait(d1::task_arena_base&); static int max_concurrency(const d1::task_arena_base*); static void enqueue(d1::task&, d1::task_group_context*, d1::task_arena_base*); + static d1::slot_id execution_slot(const d1::task_arena_base&); }; void __TBB_EXPORTED_FUNC initialize(d1::task_arena_base& ta) { @@ -533,6 +534,10 @@ void __TBB_EXPORTED_FUNC enqueue(d1::task& t, d1::task_group_context& ctx, d1::t task_arena_impl::enqueue(t, &ctx, ta); } +d1::slot_id __TBB_EXPORTED_FUNC execution_slot(const d1::task_arena_base& arena) { + return task_arena_impl::execution_slot(arena); +} + void task_arena_impl::initialize(d1::task_arena_base& ta) { // Enforce global market initialization to properly initialize soft limit (void)governor::get_thread_data(); @@ -559,7 +564,7 @@ void task_arena_impl::initialize(d1::task_arena_base& ta) { ta.my_numa_id, ta.core_type(), ta.max_threads_per_core()); if (observer) { // TODO: Consider lazy initialization for internal arena so - // the direct calls to observer might be omitted until actual initialization. + // the direct calls to observer might be omitted until actual initialization. observer->on_scheduler_entry(true); } #endif /*__TBB_CPUBIND_PRESENT*/ @@ -624,6 +629,14 @@ void task_arena_impl::enqueue(d1::task& t, d1::task_group_context* c, d1::task_a a->enqueue_task(t, *ctx, *td); } +d1::slot_id task_arena_impl::execution_slot(const d1::task_arena_base& ta) { + thread_data* td = governor::get_thread_data_if_initialized(); + if (td && (td->is_attached_to(ta.my_arena.load(std::memory_order_relaxed)))) { + return td->my_arena_index; + } + return d1::slot_id(-1); +} + class nested_arena_context : no_copy { public: nested_arena_context(thread_data& td, arena& nested_arena, std::size_t slot_index) diff --git a/src/tbb/def/lin32-tbb.def b/src/tbb/def/lin32-tbb.def index c9582a73d2..737e8ec2af 100644 --- a/src/tbb/def/lin32-tbb.def +++ b/src/tbb/def/lin32-tbb.def @@ -106,6 +106,7 @@ _ZN3tbb6detail2r120isolate_within_arenaERNS0_2d113delegate_baseEi; _ZN3tbb6detail2r17enqueueERNS0_2d14taskEPNS2_15task_arena_baseE; _ZN3tbb6detail2r17enqueueERNS0_2d14taskERNS2_18task_group_contextEPNS2_15task_arena_baseE; _ZN3tbb6detail2r14waitERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r114execution_slotERKNS0_2d115task_arena_baseE; /* System topology parsing and threads pinning (governor.cpp) */ _ZN3tbb6detail2r115numa_node_countEv; @@ -160,4 +161,3 @@ local: /* TODO: fill more precisely */ *; }; - diff --git a/src/tbb/def/lin64-tbb.def b/src/tbb/def/lin64-tbb.def index 003350b1b7..41aca2e932 100644 --- a/src/tbb/def/lin64-tbb.def +++ b/src/tbb/def/lin64-tbb.def @@ -106,6 +106,7 @@ _ZN3tbb6detail2r120isolate_within_arenaERNS0_2d113delegate_baseEl; _ZN3tbb6detail2r17enqueueERNS0_2d14taskEPNS2_15task_arena_baseE; _ZN3tbb6detail2r17enqueueERNS0_2d14taskERNS2_18task_group_contextEPNS2_15task_arena_baseE; _ZN3tbb6detail2r14waitERNS0_2d115task_arena_baseE; +_ZN3tbb6detail2r114execution_slotERKNS0_2d115task_arena_baseE; /* System topology parsing and threads pinning (governor.cpp) */ _ZN3tbb6detail2r115numa_node_countEv; diff --git a/src/tbb/def/mac64-tbb.def b/src/tbb/def/mac64-tbb.def index f8d7ed6bb6..38bc48d30e 100644 --- a/src/tbb/def/mac64-tbb.def +++ b/src/tbb/def/mac64-tbb.def @@ -108,6 +108,7 @@ __ZN3tbb6detail2r120isolate_within_arenaERNS0_2d113delegate_baseEl __ZN3tbb6detail2r17enqueueERNS0_2d14taskEPNS2_15task_arena_baseE __ZN3tbb6detail2r17enqueueERNS0_2d14taskERNS2_18task_group_contextEPNS2_15task_arena_baseE __ZN3tbb6detail2r14waitERNS0_2d115task_arena_baseE +__ZN3tbb6detail2r114execution_slotERKNS0_2d115task_arena_baseE # System topology parsing and threads pinning (governor.cpp) __ZN3tbb6detail2r115numa_node_countEv @@ -157,4 +158,3 @@ __ZN3tbb6detail2r121notify_by_address_allEPv # Versioning (version.cpp) _TBB_runtime_interface_version _TBB_runtime_version - diff --git a/src/tbb/def/win32-tbb.def b/src/tbb/def/win32-tbb.def index c7c09e62f4..94b5441701 100644 --- a/src/tbb/def/win32-tbb.def +++ b/src/tbb/def/win32-tbb.def @@ -100,6 +100,7 @@ EXPORTS ?terminate@r1@detail@tbb@@YAXAAVtask_arena_base@d1@23@@Z ?wait@r1@detail@tbb@@YAXAAVtask_arena_base@d1@23@@Z ?enqueue@r1@detail@tbb@@YAXAAVtask@d1@23@AAVtask_group_context@523@PAVtask_arena_base@523@@Z +?execution_slot@r1@detail@tbb@@YAGABVtask_arena_base@d1@23@@Z ; System topology parsing and threads pinning (governor.cpp) ?numa_node_count@r1@detail@tbb@@YAIXZ diff --git a/src/tbb/def/win64-tbb.def b/src/tbb/def/win64-tbb.def index 0fb46c2933..96bafc0163 100644 --- a/src/tbb/def/win64-tbb.def +++ b/src/tbb/def/win64-tbb.def @@ -100,6 +100,7 @@ EXPORTS ?isolate_within_arena@r1@detail@tbb@@YAXAEAVdelegate_base@d1@23@_J@Z ?enqueue@r1@detail@tbb@@YAXAEAVtask@d1@23@PEAVtask_arena_base@523@@Z ?enqueue@r1@detail@tbb@@YAXAEAVtask@d1@23@AEAVtask_group_context@523@PEAVtask_arena_base@523@@Z +?execution_slot@r1@detail@tbb@@YAGAEBVtask_arena_base@d1@23@@Z ; System topology parsing and threads pinning (governor.cpp) ?numa_node_count@r1@detail@tbb@@YAIXZ diff --git a/src/tbb/scheduler_common.h b/src/tbb/scheduler_common.h index 06ec543e6b..e4686e1673 100644 --- a/src/tbb/scheduler_common.h +++ b/src/tbb/scheduler_common.h @@ -397,7 +397,7 @@ struct suspend_point_type { void finilize_resume() { m_stack_state.store(stack_state::active, std::memory_order_relaxed); - // Set the suspended state for the stack that we left. If the state is already notified, it means that + // Set the suspended state for the stack that we left. If the state is already notified, it means that // someone already tried to resume our previous stack but failed. So, we need to resume it. // m_prev_suspend_point might be nullptr when destroying co_context based on threads if (m_prev_suspend_point && m_prev_suspend_point->m_stack_state.exchange(stack_state::suspended) == stack_state::notified) { diff --git a/src/tbb/task.cpp b/src/tbb/task.cpp index fde41980a0..84b4278f0a 100644 --- a/src/tbb/task.cpp +++ b/src/tbb/task.cpp @@ -255,4 +255,3 @@ d1::wait_tree_vertex_interface* get_thread_reference_vertex(d1::wait_tree_vertex } // namespace r1 } // namespace detail } // namespace tbb - diff --git a/test/common/graph_utils.h b/test/common/graph_utils.h index 24814d5fd3..2ab6db854f 100644 --- a/test/common/graph_utils.h +++ b/test/common/graph_utils.h @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ #include "common/spin_barrier.h" -using tbb::detail::d1::SUCCESSFULLY_ENQUEUED; +using tbb::detail::d2::SUCCESSFULLY_ENQUEUED; // Needed conversion to and from continue_msg, but didn't want to add // conversion operators to the class, since we don't want it in general, @@ -277,9 +277,9 @@ struct harness_counting_receiver : public tbb::flow::receiver { return my_graph; } - tbb::detail::d1::graph_task *try_put_task( const T & ) override { + tbb::detail::d2::graph_task *try_put_task( const T & ) override { ++my_count; - return const_cast(SUCCESSFULLY_ENQUEUED); + return const_cast(SUCCESSFULLY_ENQUEUED); } void validate() { @@ -323,13 +323,13 @@ struct harness_mapped_receiver : public tbb::flow::receiver { my_multiset = new multiset_type; } - tbb::detail::d1::graph_task* try_put_task( const T &t ) override { + tbb::detail::d2::graph_task* try_put_task( const T &t ) override { if ( my_multiset ) { (*my_multiset).emplace( t ); } else { ++my_count; } - return const_cast(SUCCESSFULLY_ENQUEUED); + return const_cast(SUCCESSFULLY_ENQUEUED); } tbb::flow::graph& graph_reference() const override { @@ -842,7 +842,7 @@ struct throwing_body{ if(my_counter == Threshold) throw Threshold; } - + template output_tuple_type operator()(const input_type&) { ++my_counter; diff --git a/test/tbb/test_broadcast_node.cpp b/test/tbb/test_broadcast_node.cpp index b3905e6d60..fe0eea0f13 100644 --- a/test/tbb/test_broadcast_node.cpp +++ b/test/tbb/test_broadcast_node.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ //! \brief Test for [flow_graph.broadcast_node] specification -#define TBB_INTERNAL_NAMESPACE detail::d1 +#define TBB_INTERNAL_NAMESPACE detail::d2 namespace tbb { using task = TBB_INTERNAL_NAMESPACE::graph_task; } @@ -281,4 +281,3 @@ TEST_CASE("Deduction guides"){ test_deduction_guides(); } #endif - diff --git a/test/tbb/test_buffer_node.cpp b/test/tbb/test_buffer_node.cpp index 89f4485b3d..c1c4582b8e 100644 --- a/test/tbb/test_buffer_node.cpp +++ b/test/tbb/test_buffer_node.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -307,7 +307,7 @@ int test_parallel(int num_threads) { // Chained buffers ( 2 & 3 ), single sender, items at last buffer in arbitrary order // -#define TBB_INTERNAL_NAMESPACE detail::d1 +#define TBB_INTERNAL_NAMESPACE detail::d2 using tbb::TBB_INTERNAL_NAMESPACE::register_predecessor; using tbb::TBB_INTERNAL_NAMESPACE::remove_predecessor; diff --git a/test/tbb/test_continue_node.cpp b/test/tbb/test_continue_node.cpp index 8c2c5c5bb9..4b81c8ee94 100644 --- a/test/tbb/test_continue_node.cpp +++ b/test/tbb/test_continue_node.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -63,7 +63,7 @@ template< typename OutputType > void run_continue_nodes( int p, tbb::flow::graph& g, tbb::flow::continue_node< OutputType >& n ) { fake_continue_sender fake_sender; for (size_t i = 0; i < N; ++i) { - tbb::detail::d1::register_predecessor(n, fake_sender); + tbb::detail::d2::register_predecessor(n, fake_sender); } for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { @@ -138,7 +138,7 @@ void continue_nodes_with_copy( ) { tbb::flow::continue_node< OutputType > exe_node( g, cf ); fake_continue_sender fake_sender; for (size_t i = 0; i < N; ++i) { - tbb::detail::d1::register_predecessor(exe_node, fake_sender); + tbb::detail::d2::register_predecessor(exe_node, fake_sender); } for (size_t num_receivers = 1; num_receivers <= MAX_NODES; ++num_receivers ) { diff --git a/test/tbb/test_flow_graph_whitebox.cpp b/test/tbb/test_flow_graph_whitebox.cpp index a3ed03b252..88365d892d 100644 --- a/test/tbb/test_flow_graph_whitebox.cpp +++ b/test/tbb/test_flow_graph_whitebox.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2021 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -459,7 +459,7 @@ template <> struct DecrementerHelper { template static void check(Decrementer& decrementer) { - auto& d = static_cast(decrementer); + auto& d = static_cast(decrementer); CHECK_MESSAGE(d.my_predecessor_count == 0, "error in pred count"); CHECK_MESSAGE(d.my_initial_predecessor_count == 0, "error in initial pred count"); CHECK_MESSAGE(d.my_current_count == 0, "error in current count"); diff --git a/test/tbb/test_function_node.cpp b/test/tbb/test_function_node.cpp index aa7e41ca59..bf1e664988 100644 --- a/test/tbb/test_function_node.cpp +++ b/test/tbb/test_function_node.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2021 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/tbb/test_input_node.cpp b/test/tbb/test_input_node.cpp index f27bf71482..73082ae075 100644 --- a/test/tbb/test_input_node.cpp +++ b/test/tbb/test_input_node.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,8 +30,8 @@ //! \brief Test for [flow_graph.input_node] specification -using tbb::detail::d1::graph_task; -using tbb::detail::d1::SUCCESSFULLY_ENQUEUED; +using tbb::detail::d2::graph_task; +using tbb::detail::d2::SUCCESSFULLY_ENQUEUED; const int N = 1000; diff --git a/test/tbb/test_join_node.h b/test/tbb/test_join_node.h index 8bb12bad51..2216310c1a 100644 --- a/test/tbb/test_join_node.h +++ b/test/tbb/test_join_node.h @@ -245,10 +245,10 @@ struct my_struct_key { } }; -using tbb::detail::d1::type_to_key_function_body; -using tbb::detail::d1::hash_buffer; +using tbb::detail::d2::type_to_key_function_body; +using tbb::detail::d2::type_to_key_function_body_leaf; +using tbb::detail::d2::hash_buffer; using tbb::detail::d1::tbb_hash_compare; -using tbb::detail::d1::type_to_key_function_body_leaf; template struct VtoKFB { typedef type_to_key_function_body type; diff --git a/test/tbb/test_limiter_node.cpp b/test/tbb/test_limiter_node.cpp index 897f840d36..43cc6750eb 100644 --- a/test/tbb/test_limiter_node.cpp +++ b/test/tbb/test_limiter_node.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2023 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -38,8 +38,8 @@ const int L = 10; const int N = 1000; -using tbb::detail::d1::SUCCESSFULLY_ENQUEUED; -using tbb::detail::d1::graph_task; +using tbb::detail::d2::SUCCESSFULLY_ENQUEUED; +using tbb::detail::d2::graph_task; template< typename T > struct serial_receiver : public tbb::flow::receiver, utils::NoAssign { diff --git a/test/tbb/test_tagged_msg.cpp b/test/tbb/test_tagged_msg.cpp index 656f0d3e89..520ecda9c2 100644 --- a/test/tbb/test_tagged_msg.cpp +++ b/test/tbb/test_tagged_msg.cpp @@ -1,5 +1,5 @@ /* - Copyright (c) 2005-2022 Intel Corporation + Copyright (c) 2005-2024 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ typedef tbb::flow::tagged_msg wi(42); Wrapper wic(23);