From c9c7a848fe365de9618f80dcf07aebe9187327fc Mon Sep 17 00:00:00 2001 From: Veelz Date: Fri, 25 Jan 2019 12:17:01 +0300 Subject: [PATCH 1/6] Implement segmented stack --- cds/container/segmented_stack.h | 293 ++++++++ cds/intrusive/segmented_stack.h | 623 ++++++++++++++++++ projects/Win/vc141/gtest-stack.vcxproj | 6 + .../Win/vc141/gtest-stack.vcxproj.filters | 18 + projects/Win/vc141/stress-stack.vcxproj | 2 +- test/stress/stack/intrusive_push_pop.cpp | 26 + test/stress/stack/intrusive_stack_push_pop.h | 1 + test/stress/stack/intrusive_stack_type.h | 76 +++ test/stress/stack/push.cpp | 3 + test/stress/stack/push_pop.cpp | 3 + test/stress/stack/stack_type.h | 85 +++ test/unit/stack/CMakeLists.txt | 4 + .../stack/intrusive_segmented_stack_dhp.cpp | 122 ++++ .../stack/intrusive_segmented_stack_hp.cpp | 122 ++++ test/unit/stack/segmented_stack_dhp.cpp | 86 +++ test/unit/stack/segmented_stack_hp.cpp | 73 ++ .../stack/test_intrusive_segmented_stack.h | 153 +++++ test/unit/stack/test_segmented_stack.h | 64 ++ 18 files changed, 1759 insertions(+), 1 deletion(-) create mode 100644 cds/container/segmented_stack.h create mode 100644 cds/intrusive/segmented_stack.h create mode 100644 test/unit/stack/intrusive_segmented_stack_dhp.cpp create mode 100644 test/unit/stack/intrusive_segmented_stack_hp.cpp create mode 100644 test/unit/stack/segmented_stack_dhp.cpp create mode 100644 test/unit/stack/segmented_stack_hp.cpp create mode 100644 test/unit/stack/test_intrusive_segmented_stack.h create mode 100644 test/unit/stack/test_segmented_stack.h diff --git a/cds/container/segmented_stack.h b/cds/container/segmented_stack.h new file mode 100644 index 000000000..61c494714 --- /dev/null +++ b/cds/container/segmented_stack.h @@ -0,0 +1,293 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SEGMENTED_STACK_H +#define CDSLIB_CONTAINER_SEGMENTED_STACK_H + +#include +#include // ref +#include +// #include + +namespace cds { namespace container { + + /// SegmentedStack -related declarations + namespace segmented_stack { + +# ifdef CDS_DOXYGEN_INVOKED + /// SegmentedStack internal statistics + typedef cds::intrusive::segmented_stack::stat stat; +# else + using cds::intrusive::segmented_stack::stat; +# endif + + /// SegmentedStack empty internal statistics (no overhead) + typedef cds::intrusive::segmented_stack::empty_stat empty_stat; + + /// SegmentedStack default type traits + struct traits { + + /// Item allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + typedef atomicity::item_counter item_counter; + + /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) + typedef segmented_stack::empty_stat stat; + + /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types + typedef opt::v::relaxed_ordering memory_model; + + /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification + enum { alignment = opt::cache_line_alignment }; + + enum { padding = cds::intrusive::segmented_stack::traits::padding }; + + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Lock type used to maintain an internal list of allocated segments + typedef cds::sync::spin lock_type; + }; + + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace segmented_stack + + //@cond + namespace details { + + template + struct make_segmented_stack + { + typedef GC gc; + typedef T value_type; + typedef Traits original_type_traits; + + typedef cds::details::Allocator< T, typename original_type_traits::node_allocator > cxx_node_allocator; + struct node_disposer { + void operator()( T * p ) + { + cxx_node_allocator().Delete( p ); + } + }; + + struct intrusive_type_traits: public original_type_traits + { + typedef node_disposer disposer; + }; + + typedef cds::intrusive::SegmentedStack< gc, value_type, intrusive_type_traits > type; + }; + + } // namespace details + //@endcond + + /// Segmented stack + /** @ingroup cds_nonintrusive_stack + + The stack is based on work + - [2014] Henzinger, Kirsch, Payer, Sezgin, Sokolova Quantitative Relaxation of Concurrent Data Structures + + Template parameters: + - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::DHP + - \p T - the type of values stored in the stack + - \p Traits - stack type traits, default is \p segmented_stack::traits. + \p segmented_stack::make_traits metafunction can be used to construct your + type traits. + */ + template + class SegmentedStack: +#ifdef CDS_DOXYGEN_INVOKED + public cds::intrusive::SegmentedStack< GC, T, Traits > +#else + public details::make_segmented_stack< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_segmented_stack< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of the value stored in the stack + typedef Traits traits; ///< Stack traits + + typedef typename traits::node_allocator node_allocator; ///< Node allocator + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter + typedef typename base_class::stat stat ; ///< Internal statistics policy + typedef typename base_class::lock_type lock_type ; ///< Type of mutex for maintaining an internal list of allocated segments. + + static const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; + + static value_type * alloc_node( value_type const& v ) + { + return cxx_node_allocator().New( v ); + } + + static value_type * alloc_node() + { + return cxx_node_allocator().New(); + } + + template + static value_type * alloc_node_move( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward( args )... ); + } + //@endcond + + public: + /// Initializes the empty stack + SegmentedStack( + size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. + ) + : base_class( nQuasiFactor ) + {} + + /// Clears the stack and deletes all internal data + ~SegmentedStack() + {} + + bool push( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } + + /// Inserts a new element at last segment of the stack, move semantics + bool push( value_type&& val ) + { + scoped_node_ptr p( alloc_node_move( std::move( val ))); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } + + template + bool push_with( Func f ) + { + scoped_node_ptr p( alloc_node()); + f( *p ); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } + + + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... )); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } + + /// Pop a value from the stack + bool pop( value_type& dest ) + { + return pop_with( [&dest]( value_type& src ) { dest = std::move( src );}); + } + + /// Pop a value using a functor + template + bool pop_with( Func f ) + { + value_type * p = base_class::pop(); + if ( p ) { + f( *p ); + gc::template retire< typename maker::node_disposer >( p ); + return true; + } + return false; + } + + + /// Checks if the stack is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the stack + void clear() + { + base_class::clear(); + } + + /// Returns stack's item count + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + + /// Returns quasi factor, a power-of-two number + size_t quasi_factor() const + { + return base_class::quasi_factor(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SEGMENTED_STACK_H diff --git a/cds/intrusive/segmented_stack.h b/cds/intrusive/segmented_stack.h new file mode 100644 index 000000000..1720d6bbe --- /dev/null +++ b/cds/intrusive/segmented_stack.h @@ -0,0 +1,623 @@ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) + +#ifndef CDSLIB_INTRUSIVE_SEGMENTED_STACK_H +#define CDSLIB_INTRUSIVE_SEGMENTED_STACK_H + +#include +#include +#include +#include +#include +#include + +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning( push ) +# pragma warning( disable: 4355 ) // warning C4355: 'this' : used in base member initializer list +#endif + +namespace cds { namespace intrusive { + + /// SegmentedStack -related declarations + namespace segmented_stack { + + /// SegmentedStack internal statistics. May be used for debugging or profiling + template + struct stat + { + typedef Counter counter_type; ///< Counter type + + counter_type m_nPush; ///< Push count + counter_type m_nPushPopulated; ///< Number of attempts to push to populated (non-empty) cell + counter_type m_nPushContended; ///< Number of failed CAS when pushing + counter_type m_nPop; ///< Pop count + counter_type m_nPopEmpty; ///< Number of poping from empty stack + counter_type m_nPopContended; ///< Number of failed CAS when popping + + counter_type m_nCreateSegmentReq; ///< Number of request to create new segment + counter_type m_nDeleteSegmentReq; ///< Number to request to delete segment + counter_type m_nSegmentCreated; ///< Number of created segments + counter_type m_nSegmentDeleted; ///< Number of deleted segments + + //@cond + void onPush() { ++m_nPush; } + void onPushPopulated() { ++m_nPushPopulated; } + void onPushContended() { ++m_nPushContended; } + void onPop() { ++m_nPop; } + void onPopEmpty() { ++m_nPopEmpty; } + void onPopContended() { ++m_nPopContended; } + void onCreateSegmentReq() { ++m_nCreateSegmentReq; } + void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } + void onSegmentCreated() { ++m_nSegmentCreated; } + void onSegmentDeleted() { ++m_nSegmentDeleted; } + //@endcond + }; + + /// Dummy SegmentedStack statistics, no overhead + struct empty_stat { + //@cond + void onPush() const {} + void onPushPopulated() const {} + void onPushContended() const {} + void onPop() const {} + void onPopEmpty() const {} + void onPopContended() const {} + void onCreateSegmentReq() const {} + void onDeleteSegmentReq() const {} + void onSegmentCreated() const {} + void onSegmentDeleted() const {} + //@endcond + }; + + /// SegmentedStack default traits + struct traits { + /// Element disposer that is called when the item to be pop. Default is opt::v::empty_disposer (no disposer) + typedef opt::v::empty_disposer disposer; + + /// Item counter, default is atomicity::item_counter + /** + The item counting is an essential part of segmented stack algorithm. + The \p empty() member function is based on checking size() == 0. + Therefore, dummy item counter like atomicity::empty_item_counter is not the proper counter. + */ + typedef atomicity::item_counter item_counter; + + /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) + typedef segmented_stack::empty_stat stat; + + /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types + typedef opt::v::relaxed_ordering memory_model; + + /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification + enum { alignment = opt::cache_line_alignment }; + + /// Padding of segment data, default is no special padding + /** + The segment is just an array of atomic data pointers, + so, the high load leads to false sharing and performance degradation. + A padding of segment data can eliminate false sharing issue. + On the other hand, the padding leads to increase segment size. + */ + enum { padding = opt::no_special_padding }; + + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Lock type used to maintain an internal list of allocated segments + typedef cds::sync::spin lock_type; + }; + + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + } // namespace segmented_stack + + /// Segmented stack + /** @ingroup cds_intrusive_stack + + The stack is based on work + - [2014] Henzinger, Kirsch, Payer, Sezgin, Sokolova Quantitative Relaxation of Concurrent Data Structures + + Template parameters: + - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::DHP + - \p T - the type of values stored in the stack + - \p Traits - stack type traits, default is \p segmented_stack::traits. + \p segmented_stack::make_traits metafunction can be used to construct the + type traits. + + */ + template + class SegmentedStack + { + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of the value stored in the stack + typedef Traits traits; ///< Stack traits + + typedef typename traits::disposer disposer ; ///< value disposer, called only in \p clear() when the element to be pop + typedef typename traits::allocator allocator; ///< Allocator maintaining the segments + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter + typedef typename traits::stat stat; ///< Internal statistics policy + typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments. + + static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + // Segment cell. LSB is used as deleted mark + typedef cds::details::marked_ptr< value_type, 1 > regular_cell; + typedef atomics::atomic< regular_cell > atomic_cell; + typedef typename cds::opt::details::apply_padding< atomic_cell, traits::padding >::type cell; + + // Segment + struct segment: public boost::intrusive::slist_base_hook<> + { + cell * cells; // Cell array of size \ref m_nQuasiFactor + size_t version; // version tag (ABA prevention tag) + // cell array is placed here in one continuous memory block + bool retired; + + // Initializes the segment + explicit segment( size_t nCellCount ) + // MSVC warning C4355: 'this': used in base member initializer list + : cells( reinterpret_cast< cell *>( this + 1 )) + , version( 0 ) + , retired( false ) + { + init( nCellCount ); + } + + segment() = delete; + + void init( size_t nCellCount ) + { + cell * pLastCell = cells + nCellCount; + for ( cell* pCell = cells; pCell < pLastCell; ++pCell ) + pCell->data.store( regular_cell(), atomics::memory_order_relaxed ); + atomics::atomic_thread_fence( memory_model::memory_order_release ); + } + }; + + typedef typename opt::details::alignment_setter< atomics::atomic, traits::alignment >::type aligned_segment_ptr; + //@endcond + + protected: + //@cond + class segment_list + { + typedef boost::intrusive::slist< segment, boost::intrusive::cache_last< true > > list_impl; + typedef std::unique_lock< lock_type > scoped_lock; + + aligned_segment_ptr m_pHead; + aligned_segment_ptr m_pTail; + + list_impl m_List; + mutable lock_type m_Lock; + size_t const m_nQuasiFactor; + stat& m_Stat; + + private: + struct segment_disposer + { + void operator()( segment * pSegment ) + { + assert( pSegment != nullptr ); + free_segment( pSegment ); + } + }; + + struct gc_segment_disposer + { + void operator()( segment * pSegment ) + { + assert( pSegment != nullptr ); + retire_segment( pSegment ); + } + }; + + public: + segment_list( size_t nQuasiFactor, stat& st ) + : m_pHead( nullptr ) + , m_pTail( nullptr ) + , m_nQuasiFactor( nQuasiFactor ) + , m_Stat( st ) + { + assert( cds::beans::is_power2( nQuasiFactor )); + } + + ~segment_list() + { + m_List.clear_and_dispose( gc_segment_disposer()); + } + + segment * head( typename gc::Guard& guard ) + { + return guard.protect( m_pHead ); + } + + segment * tail( typename gc::Guard& guard ) + { + return guard.protect( m_pTail ); + } + +# ifdef _DEBUG + bool populated( segment const& s ) const + { + // The lock should be held + cell const * pLastCell = s.cells + quasi_factor(); + for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->data.load( memory_model::memory_order_relaxed ).all()) + return false; + } + return true; + } + bool exhausted( segment const& s ) const + { + // The lock should be held + cell const * pLastCell = s.cells + quasi_factor(); + for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits()) + return false; + } + return true; + } +# endif + + segment * create_head( segment * pHead, typename gc::Guard& guard ) + { + // pHead is guarded by GC + + m_Stat.onCreateSegmentReq(); + + scoped_lock l( m_Lock ); + + if ( !m_List.empty() && (pHead != &m_List.front() || get_version(pHead) != m_List.front().version )) { + m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed ); + + return guard.assign( &m_List.front()); + } + +# ifdef _DEBUG + assert( m_List.empty() || populated( m_List.front())); +# endif + + segment * pNew = allocate_segment(); + m_Stat.onSegmentCreated(); + + m_List.push_front( *pNew ); + m_pHead.store(pNew, memory_model::memory_order_release); + return guard.assign( pNew ); + } + + segment * remove_head( segment * pHead, typename gc::Guard& guard ) + { + // pHead is guarded by GC + m_Stat.onDeleteSegmentReq(); + + segment * pRet; + { + scoped_lock l( m_Lock ); + + if ( m_List.empty()) { +// m_pTail.store( nullptr, memory_model::memory_order_relaxed ); + m_pHead.store( nullptr, memory_model::memory_order_relaxed ); + return guard.assign( nullptr ); + } + + if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) { + m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed ); + return guard.assign( &m_List.front()); + } + +# ifdef _DEBUG + // assert( exhausted( m_List.front())); +# endif + + m_List.pop_front(); + if ( m_List.empty()) { + pRet = guard.assign( nullptr ); +// m_pTail.store( nullptr, memory_model::memory_order_relaxed ); + } + else + pRet = guard.assign( &m_List.front()); + m_pHead.store( pRet, memory_model::memory_order_release ); + } + + pHead->retired = true; + retire_segment( pHead ); + m_Stat.onSegmentDeleted(); + + return pRet; + } + + size_t quasi_factor() const + { + return m_nQuasiFactor; + } + + private: + typedef cds::details::Allocator< segment, allocator > segment_allocator; + + static size_t get_version( segment * pSegment ) + { + return pSegment ? pSegment->version : 0; + } + + segment * allocate_segment() + { + return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor()); + } + + static void free_segment( segment * pSegment ) + { + segment_allocator().Delete( pSegment ); + } + + static void retire_segment( segment * pSegment ) + { + gc::template retire( pSegment ); + } + }; + //@endcond + + protected: + segment_list m_SegmentList; ///< List of segments + + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + + public: + /// Initializes the empty stack + SegmentedStack( + size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. + ) + : m_SegmentList( cds::beans::ceil2(nQuasiFactor), m_Stat ) + { + static_assert( (!std::is_same< item_counter, cds::atomicity::empty_item_counter >::value), + "cds::atomicity::empty_item_counter is not supported for SegmentedStack" + ); + assert( m_SegmentList.quasi_factor() > 1 ); + } + + /// Clears the stack and deletes all internal data + ~SegmentedStack() + { + clear(); + } + + /// Inserts a new element at last segment of the stack + bool push( value_type& val ) + { + // return true; + // LSB is used as a flag in marked pointer + assert( (reinterpret_cast( &val ) & 1) == 0 ); + + typename gc::Guard segmentGuard; + segment * pHeadSegment = m_SegmentList.head( segmentGuard ); + if ( !pHeadSegment) { + // no segments, create the new one + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); + assert(pHeadSegment); + } + ++m_ItemCounter; + + while ( true ) { + regular_cell nullCell; + size_t index = 0; + if ( find_empty_cell(pHeadSegment, nullCell, index) ) + { + typename gc::Guard segGuard; + if (pHeadSegment == m_SegmentList.head(segGuard)) + { + regular_cell newCell(&val); + if (pHeadSegment->cells[index].data.compare_exchange_strong(nullCell, newCell, + memory_model::memory_order_release, atomics::memory_order_relaxed)) + { + if ( committed(pHeadSegment, newCell, index) ) + { + m_Stat.onPush(); + return true; + } + } + } + // segment or segment list was updated + continue; + } + // No available position, create a new segment + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); + } + } + + value_type * pop() + { + typename gc::Guard itemGuard; + if ( do_pop( itemGuard )) { + value_type * pVal = itemGuard.template get(); + assert( pVal ); + return pVal; + } + return nullptr; + } + + /// Checks if the stack is empty + bool empty() const + { + return size() == 0; + } + + /// Clear the stack + void clear() + { + clear_with( disposer()); + } + + template + void clear_with( Disposer ) + { + typename gc::Guard itemGuard; + while ( do_pop( itemGuard )) { + assert( itemGuard.template get()); + gc::template retire( itemGuard.template get()); + itemGuard.clear(); + } + } + + /// Returns stack's item count + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return m_Stat; + } + + /// Returns quasi factor, a power-of-two number + size_t quasi_factor() const + { + return m_SegmentList.quasi_factor(); + } + + protected: + + //@cond + bool find_empty_cell(segment *pHeadSegment, regular_cell &item, size_t &index) + { + size_t i = 0; + size_t qf = quasi_factor(); + do { + regular_cell cell = pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed); + if (cell.all()) { + // Cell is not empty, go next + m_Stat.onPushPopulated(); + } + else { + // empty cell is found + item = cell; + index = i; + return true; + } + ++i; + } while (i < qf); + + return false; + } + //@endcond + + //@cond + bool committed(segment *pHeadSegment, regular_cell &new_item, size_t index) + { + if (pHeadSegment->cells[index].data.load() != new_item) + { + return true; + } + else if (!pHeadSegment->retired) + { + return true; + } + else // top_old->retired == true + { + typename gc::Guard segmentGuard; + regular_cell nullCell; + if (pHeadSegment != m_SegmentList.head(segmentGuard)) + { + if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, + memory_model::memory_order_release, atomics::memory_order_relaxed)) + { + return true; + } + } + else + { + pHeadSegment->version++; + if ( pHeadSegment == m_SegmentList.head(segmentGuard) ) + { + return true; + } + if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, + memory_model::memory_order_release, atomics::memory_order_relaxed)) + { + return true; + } + } + } + return false; + } + //@endcond + + //@cond + bool do_pop( typename gc::Guard& itemGuard ) + { + typename gc::Guard segmentGuard; + segment * pHeadSegment = m_SegmentList.head(segmentGuard); + while (true) { + if (!pHeadSegment) { + // Stack is empty + m_Stat.onPopEmpty(); + return false; + } + regular_cell item; + CDS_DEBUG_ONLY(size_t nLoopCount = 0); + size_t i = 1; + size_t qf = quasi_factor(); + do + { + CDS_DEBUG_ONLY(++nLoopCount); + + // Guard the item + // In segmented stack the cell cannot be reused + // So no loop is needed here to protect the cell + item = pHeadSegment->cells[qf - i].data.load(memory_model::memory_order_relaxed); + itemGuard.assign(item.ptr()); + + // Check if this cell is empty, which means an element + // can be pushed to this cell in the future + if (item.ptr()) + { // If the item is not deleted yet + if (!item.bits()) + { + // Try to mark the cell as deleted + if (pHeadSegment->cells[qf - i].data.compare_exchange_strong(item, item | 1, + memory_model::memory_order_acquire, atomics::memory_order_relaxed)) + { + --m_ItemCounter; + m_Stat.onPop(); + + return true; + } + assert(item.bits()); + m_Stat.onPopContended(); + continue; + } + } + i++; + } while (i <= qf); + + // All nodes have been poped, we can safely remove the first segment + pHeadSegment = m_SegmentList.remove_head(pHeadSegment, segmentGuard); + } + } + //@endcond + }; +}} // namespace cds::intrusive + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning( pop ) +#endif + +#endif // #ifndef CDSLIB_INTRUSIVE_SEGMENTED_STACK_H diff --git a/projects/Win/vc141/gtest-stack.vcxproj b/projects/Win/vc141/gtest-stack.vcxproj index e2174f296..862c2a552 100644 --- a/projects/Win/vc141/gtest-stack.vcxproj +++ b/projects/Win/vc141/gtest-stack.vcxproj @@ -78,13 +78,19 @@ + + + + + + diff --git a/projects/Win/vc141/gtest-stack.vcxproj.filters b/projects/Win/vc141/gtest-stack.vcxproj.filters index 3860db8cb..c221152f8 100644 --- a/projects/Win/vc141/gtest-stack.vcxproj.filters +++ b/projects/Win/vc141/gtest-stack.vcxproj.filters @@ -32,6 +32,18 @@ Source Files + + Source Files + + + Source Files + + + Source Files + + + Source Files + @@ -40,5 +52,11 @@ Header Files + + Header Files + + + Header Files + \ No newline at end of file diff --git a/projects/Win/vc141/stress-stack.vcxproj b/projects/Win/vc141/stress-stack.vcxproj index 6a92e8728..12e4d58bc 100644 --- a/projects/Win/vc141/stress-stack.vcxproj +++ b/projects/Win/vc141/stress-stack.vcxproj @@ -549,7 +549,7 @@ true true $(GTEST_LIB32);$(GTEST_ROOT)/lib/x86;$(BOOST_PATH)/stage32/lib;$(BOOST_PATH)/stage/lib;$(BOOST_PATH)/bin;%(AdditionalLibraryDirectories);$(OutDir) - gtest.lib;stress-framework.lib;%(AdditionalDependencies) + gtest.lib;stress-framework.lib;libcds-$(PlatformTarget).lib;%(AdditionalDependencies) diff --git a/test/stress/stack/intrusive_push_pop.cpp b/test/stress/stack/intrusive_push_pop.cpp index 3ab522849..5e8da970b 100644 --- a/test/stress/stack/intrusive_push_pop.cpp +++ b/test/stress/stack/intrusive_push_pop.cpp @@ -10,6 +10,7 @@ namespace cds_test { /*static*/ size_t intrusive_stack_push_pop::s_nPopThreadCount = 4; /*static*/ size_t intrusive_stack_push_pop::s_nStackSize = 10000000; /*static*/ size_t intrusive_stack_push_pop::s_nEliminationSize = 4; + /*static*/ size_t intrusive_stack_push_pop::s_nQuasiFactor = 15; /*static*/ bool intrusive_stack_push_pop::s_bFCIterative = false; /*static*/ unsigned int intrusive_stack_push_pop::s_nFCCombinePassCount = 64; /*static*/ unsigned int intrusive_stack_push_pop::s_nFCCompactFactor = 1024; @@ -34,6 +35,8 @@ namespace cds_test { s_nPopThreadCount = 1; if ( s_nEliminationSize == 0 ) s_nEliminationSize = 1; + if ( s_nQuasiFactor == 0 ) + s_nQuasiFactor = 15; } } // namespace cds_test @@ -88,6 +91,17 @@ namespace { Stack stack; do_test( stack, arrValue ); } + + template + void test_segmented() + { + value_array arrValue(s_nStackSize); + { + Stack stack( s_nQuasiFactor ); + do_test( stack, arrValue ); + } + Stack::gc::force_dispose(); + } }; // TreiberStack @@ -150,6 +164,18 @@ namespace { CDSSTRESS_StdStack( intrusive_stack_push_pop ) +#undef CDSSTRESS_Stack_F + + // SegmentedStack +#define CDSSTRESS_Stack_F( test_fixture, stack_impl ) \ + TEST_F( test_fixture, stack_impl ) \ + { \ + typedef typename istack::Types::stack_impl stack_type; \ + test_segmented< stack_type >(); \ + } + + CDSSTRESS_SegmentedStack( intrusive_stack_push_pop ) + #undef CDSSTRESS_Stack_F //INSTANTIATE_TEST_CASE_P( a, intrusive_stack_push_pop, ::testing::Values(1)); diff --git a/test/stress/stack/intrusive_stack_push_pop.h b/test/stress/stack/intrusive_stack_push_pop.h index b14ca04f0..26d268d18 100644 --- a/test/stress/stack/intrusive_stack_push_pop.h +++ b/test/stress/stack/intrusive_stack_push_pop.h @@ -14,6 +14,7 @@ namespace cds_test { static size_t s_nPopThreadCount; static size_t s_nStackSize; static size_t s_nEliminationSize; + static size_t s_nQuasiFactor; static bool s_bFCIterative; static unsigned int s_nFCCombinePassCount; static unsigned int s_nFCCompactFactor; diff --git a/test/stress/stack/intrusive_stack_type.h b/test/stress/stack/intrusive_stack_type.h index 22e584c97..57993c462 100644 --- a/test/stress/stack/intrusive_stack_type.h +++ b/test/stress/stack/intrusive_stack_type.h @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -344,6 +345,49 @@ namespace istack { typedef details::StdStack< T, std::stack< T*, std::list >, std::mutex > StdStack_List_Mutex; typedef details::StdStack< T, std::stack< T*, std::list >, cds::sync::spin > StdStack_List_Spin; + // SegmentedStack + class traits_SegmentedStack_spin_stat : + public cds::intrusive::segmented_stack::make_traits< + cds::opt::stat< cds::intrusive::segmented_stack::stat<> > + >::type + {}; + class traits_SegmentedStack_spin_padding : + public cds::intrusive::segmented_stack::make_traits< + cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; + class traits_SegmentedStack_mutex_stat : + public cds::intrusive::segmented_stack::make_traits< + cds::opt::stat< cds::intrusive::segmented_stack::stat<> > + , cds::opt::lock_type< std::mutex > + >::type + {}; + class traits_SegmentedStack_mutex : + public cds::intrusive::segmented_stack::make_traits< + cds::opt::lock_type< std::mutex > + >::type + {}; + class traits_SegmentedStack_mutex_padding : + public cds::intrusive::segmented_stack::make_traits< + cds::opt::lock_type< std::mutex > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; + + typedef cds::intrusive::SegmentedStack< cds::gc::HP, T > SegmentedStack_HP_spin; + typedef cds::intrusive::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_spin_padding > SegmentedStack_HP_spin_padding; + typedef cds::intrusive::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_spin_stat > SegmentedStack_HP_spin_stat; + typedef cds::intrusive::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_mutex > SegmentedStack_HP_mutex; + typedef cds::intrusive::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_mutex_padding > SegmentedStack_HP_mutex_padding; + typedef cds::intrusive::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_mutex_stat > SegmentedStack_HP_mutex_stat; + + typedef cds::intrusive::SegmentedStack< cds::gc::DHP, T > SegmentedStack_DHP_spin; + typedef cds::intrusive::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_spin_padding > SegmentedStack_DHP_spin_padding; + typedef cds::intrusive::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_spin_stat > SegmentedStack_DHP_spin_stat; + typedef cds::intrusive::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_mutex > SegmentedStack_DHP_mutex; + typedef cds::intrusive::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_mutex_padding > SegmentedStack_DHP_mutex_padding; + typedef cds::intrusive::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_mutex_stat > SegmentedStack_DHP_mutex_stat; + }; } // namespace istack @@ -383,6 +427,27 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nCollided ) << static_cast< cds::algo::flat_combining::stat<> const&>( s ); } + + + static inline property_stream& operator <<(property_stream& o, cds::intrusive::segmented_stack::empty_stat const&) + { + return o; + } + + static inline property_stream& operator <<(property_stream& o, cds::intrusive::segmented_stack::stat<> const& s) + { + return o + << CDSSTRESS_STAT_OUT(s, m_nPush) + << CDSSTRESS_STAT_OUT(s, m_nPushPopulated) + << CDSSTRESS_STAT_OUT(s, m_nPushContended) + << CDSSTRESS_STAT_OUT(s, m_nPop) + << CDSSTRESS_STAT_OUT(s, m_nPopEmpty) + << CDSSTRESS_STAT_OUT(s, m_nPopContended) + << CDSSTRESS_STAT_OUT(s, m_nCreateSegmentReq) + << CDSSTRESS_STAT_OUT(s, m_nDeleteSegmentReq) + << CDSSTRESS_STAT_OUT(s, m_nSegmentCreated) + << CDSSTRESS_STAT_OUT(s, m_nSegmentDeleted); + } } // namespace cds_test @@ -440,6 +505,17 @@ namespace cds_test { CDSSTRESS_Stack_F( test_fixture, FCStack_slist_mutex_elimination ) \ CDSSTRESS_Stack_F( test_fixture, FCStack_slist_mutex_elimination_stat ) \ +#define CDSSTRESS_SegmentedStack( test_fixture ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_spin ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_spin_padding ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_spin_stat ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_mutex ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_mutex_stat ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_DHP_mutex ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_DHP_spin ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_DHP_spin_stat ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_DHP_mutex_stat ) \ + #define CDSSTRESS_FCStack_list( test_fixture ) \ CDSSTRESS_Stack_F( test_fixture, FCStack_list ) \ CDSSTRESS_Stack_F( test_fixture, FCStack_list_stat ) \ diff --git a/test/stress/stack/push.cpp b/test/stress/stack/push.cpp index ae582aae7..1c94fc6bc 100644 --- a/test/stress/stack/push.cpp +++ b/test/stress/stack/push.cpp @@ -10,6 +10,7 @@ namespace { static size_t s_nThreadCount = 8; static size_t s_nStackSize = 10000000; static size_t s_nEliminationSize = 4; + static size_t s_nQuasiFactor = 15; class stack_push : public cds_test::stress_fixture { @@ -81,6 +82,7 @@ namespace { s_nThreadCount = cfg.get_size_t( "ThreadCount", s_nThreadCount ); s_nStackSize = cfg.get_size_t( "StackSize", s_nStackSize ); s_nEliminationSize = cfg.get_size_t( "EliminationSize", s_nEliminationSize ); + s_nQuasiFactor = cfg.get_size_t( "s_nQuasiFactor", s_nQuasiFactor ); if ( s_nThreadCount == 0 ) s_nThreadCount = 1; @@ -180,5 +182,6 @@ namespace { CDSSTRESS_FCStack( stack_push ) CDSSTRESS_FCDeque( stack_push ) CDSSTRESS_StdStack( stack_push ) + CDSSTRESS_SegmentedStack( stack_push ) } // namespace diff --git a/test/stress/stack/push_pop.cpp b/test/stress/stack/push_pop.cpp index 74d8b8968..bdb0a3574 100644 --- a/test/stress/stack/push_pop.cpp +++ b/test/stress/stack/push_pop.cpp @@ -11,6 +11,7 @@ namespace { static size_t s_nPopThreadCount = 4; static size_t s_nStackSize = 1000000; static size_t s_nEliminationSize = 4; + static size_t s_nQuasiFactor = 15; static atomics::atomic s_nWorkingProducers( 0 ); @@ -150,6 +151,7 @@ namespace { s_nPopThreadCount = cfg.get_size_t( "PopThreadCount", s_nPopThreadCount ); s_nStackSize = cfg.get_size_t( "StackSize", s_nStackSize ); s_nEliminationSize = cfg.get_size_t( "EliminationSize", s_nEliminationSize ); + s_nQuasiFactor = cfg.get_size_t( "s_nQuasiFactor", s_nQuasiFactor ); if ( s_nPushThreadCount == 0 ) s_nPushThreadCount = 1; @@ -257,5 +259,6 @@ namespace { CDSSTRESS_FCStack( stack_push_pop ) CDSSTRESS_FCDeque( stack_push_pop ) CDSSTRESS_StdStack( stack_push_pop ) + CDSSTRESS_SegmentedStack( stack_push_pop ) } // namespace diff --git a/test/stress/stack/stack_type.h b/test/stress/stack/stack_type.h index 4e58955ba..b4f0abdf8 100644 --- a/test/stress/stack/stack_type.h +++ b/test/stress/stack/stack_type.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -304,6 +305,50 @@ namespace stack { typedef cds::container::TreiberStack< cds::gc::DHP, T, traits_Elimination_exp > Elimination_DHP_exp; + // SegmentedStack + class traits_SegmentedStack_spin_stat : + public cds::container::segmented_stack::make_traits< + cds::opt::stat< cds::intrusive::segmented_stack::stat<> > + >::type + {}; + class traits_SegmentedStack_spin_padding : + public cds::container::segmented_stack::make_traits< + cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; + class traits_SegmentedStack_mutex_stat : + public cds::container::segmented_stack::make_traits< + cds::opt::stat< cds::intrusive::segmented_stack::stat<> > + , cds::opt::lock_type< std::mutex > + >::type + {}; + class traits_SegmentedStack_mutex : + public cds::container::segmented_stack::make_traits< + cds::opt::lock_type< std::mutex > + >::type + {}; + class traits_SegmentedStack_mutex_padding : + public cds::container::segmented_stack::make_traits< + cds::opt::lock_type< std::mutex > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; + + typedef cds::container::SegmentedStack< cds::gc::HP, T > SegmentedStack_HP_spin; + typedef cds::container::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_spin_padding > SegmentedStack_HP_spin_padding; + typedef cds::container::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_spin_stat > SegmentedStack_HP_spin_stat; + typedef cds::container::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_mutex > SegmentedStack_HP_mutex; + typedef cds::container::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_mutex_padding > SegmentedStack_HP_mutex_padding; + typedef cds::container::SegmentedStack< cds::gc::HP, T, traits_SegmentedStack_mutex_stat > SegmentedStack_HP_mutex_stat; + + typedef cds::container::SegmentedStack< cds::gc::DHP, T > SegmentedStack_DHP_spin; + typedef cds::container::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_spin_padding > SegmentedStack_DHP_spin_padding; + typedef cds::container::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_spin_stat > SegmentedStack_DHP_spin_stat; + typedef cds::container::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_mutex > SegmentedStack_DHP_mutex; + typedef cds::container::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_mutex_padding > SegmentedStack_DHP_mutex_padding; + typedef cds::container::SegmentedStack< cds::gc::DHP, T, traits_SegmentedStack_mutex_stat > SegmentedStack_DHP_mutex_stat; + + // FCStack typedef cds::container::FCStack< T > FCStack_deque; @@ -447,6 +492,27 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nCollided ) << static_cast const&>(s); } + + static inline property_stream& operator <<(property_stream& o, cds::container::segmented_stack::empty_stat const&) + { + return o; + } + + static inline property_stream& operator <<(property_stream& o, cds::container::segmented_stack::stat<> const& s) + { + return o + << CDSSTRESS_STAT_OUT(s, m_nPush) + << CDSSTRESS_STAT_OUT(s, m_nPushPopulated) + << CDSSTRESS_STAT_OUT(s, m_nPushContended) + << CDSSTRESS_STAT_OUT(s, m_nPop) + << CDSSTRESS_STAT_OUT(s, m_nPopEmpty) + << CDSSTRESS_STAT_OUT(s, m_nPopContended) + << CDSSTRESS_STAT_OUT(s, m_nCreateSegmentReq) + << CDSSTRESS_STAT_OUT(s, m_nDeleteSegmentReq) + << CDSSTRESS_STAT_OUT(s, m_nSegmentCreated) + << CDSSTRESS_STAT_OUT(s, m_nSegmentDeleted); + } + } // namespace cds_test #define CDSSTRESS_Stack_F( test_fixture, type_name ) \ @@ -465,6 +531,14 @@ namespace cds_test { test_elimination( stack ); \ } +#define CDSSTRESS_SegmentedStack_F( test_fixture, type_name ) \ + TEST_F( test_fixture, type_name ) \ + { \ + typedef stack::Types< value_type >::type_name stack_type; \ + stack_type stack( s_nQuasiFactor ); \ + test( stack ); \ + } + #define CDSSTRESS_TreiberStack( test_fixture ) \ CDSSTRESS_Stack_F( test_fixture, Treiber_HP ) \ CDSSTRESS_Stack_F( test_fixture, Treiber_HP_seqcst ) \ @@ -503,6 +577,17 @@ namespace cds_test { CDSSTRESS_EliminationStack_F( test_fixture, Elimination_DHP_dyn ) \ CDSSTRESS_EliminationStack_F( test_fixture, Elimination_DHP_dyn_stat) +#define CDSSTRESS_SegmentedStack( test_fixture ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_HP_spin ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_HP_spin_padding ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_HP_spin_stat ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_HP_mutex ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_HP_mutex_stat ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_DHP_spin ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_DHP_spin_stat ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_DHP_mutex ) \ + CDSSTRESS_SegmentedStack_F( test_fixture, SegmentedStack_DHP_mutex_stat ) + #define CDSSTRESS_FCStack( test_fixture ) \ CDSSTRESS_Stack_F( test_fixture, FCStack_deque ) \ CDSSTRESS_Stack_F( test_fixture, FCStack_deque_mutex ) \ diff --git a/test/unit/stack/CMakeLists.txt b/test/unit/stack/CMakeLists.txt index 2a35488cc..9a62038e4 100644 --- a/test/unit/stack/CMakeLists.txt +++ b/test/unit/stack/CMakeLists.txt @@ -8,6 +8,10 @@ set(CDSGTEST_STACK_SOURCES intrusive_treiber_stack_hp.cpp treiber_stack_dhp.cpp treiber_stack_hp.cpp + segmented_stack_hp.cpp + segmented_stack_dhp.cpp + intrusive_segmented_stack_hp.cpp + intrusive_segmented_stack_dhp.cpp ) include_directories( diff --git a/test/unit/stack/intrusive_segmented_stack_dhp.cpp b/test/unit/stack/intrusive_segmented_stack_dhp.cpp new file mode 100644 index 000000000..68e6349af --- /dev/null +++ b/test/unit/stack/intrusive_segmented_stack_dhp.cpp @@ -0,0 +1,122 @@ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) + +#include "test_intrusive_segmented_stack.h" + +#include +#include +#include + +namespace { + namespace ci = cds::intrusive; + typedef cds::gc::DHP gc_type; + + class IntrusiveSegmentedStack_DHP : public cds_test::intrusive_segmented_stack + { + typedef cds_test::intrusive_segmented_stack base_class; + + protected: + static const size_t c_QuasiFactor = 15; + + void SetUp() + { + typedef ci::SegmentedStack< gc_type, item > stack_type; + + cds::gc::dhp::smr::construct( stack_type::c_nHazardPtrCount ); + cds::threading::Manager::attachThread(); + } + + void TearDown() + { + cds::threading::Manager::detachThread(); + cds::gc::dhp::smr::destruct(); + } + + template + void check_array( V& arr ) + { + for ( size_t i = 0; i < arr.size(); ++i ) { + EXPECT_EQ( arr[i].nDisposeCount, 2u ); + EXPECT_EQ( arr[i].nDispose2Count, 1u ); + } + } + }; + + TEST_F( IntrusiveSegmentedStack_DHP, defaulted ) + { + struct stack_traits : public cds::intrusive::segmented_stack::traits + { + typedef Disposer disposer; + }; + typedef cds::intrusive::SegmentedStack< gc_type, item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + + TEST_F( IntrusiveSegmentedStack_DHP, mutex ) + { + struct stack_traits : public + cds::intrusive::segmented_stack::make_traits < + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::lock_type < std::mutex > + > ::type + {}; + typedef cds::intrusive::SegmentedStack< gc_type, item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + + TEST_F( IntrusiveSegmentedStack_DHP, padding ) + { + struct stack_traits : public cds::intrusive::segmented_stack::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding }; + typedef ci::segmented_stack::stat<> stat; + }; + typedef cds::intrusive::SegmentedStack< gc_type, item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + + TEST_F( IntrusiveSegmentedStack_DHP, bigdata_padding ) + { + struct stack_traits : public cds::intrusive::segmented_stack::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only }; + typedef cds::opt::v::sequential_consistent memory_model; + }; + typedef cds::intrusive::SegmentedStack< gc_type, big_item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + +} // namespace + diff --git a/test/unit/stack/intrusive_segmented_stack_hp.cpp b/test/unit/stack/intrusive_segmented_stack_hp.cpp new file mode 100644 index 000000000..1a42961e0 --- /dev/null +++ b/test/unit/stack/intrusive_segmented_stack_hp.cpp @@ -0,0 +1,122 @@ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) + +#include "test_intrusive_segmented_stack.h" + +#include +#include +#include + +namespace { + namespace ci = cds::intrusive; + typedef cds::gc::HP gc_type; + + class IntrusiveSegmentedStack_HP : public cds_test::intrusive_segmented_stack + { + typedef cds_test::intrusive_segmented_stack base_class; + + protected: + static const size_t c_QuasiFactor = 16; + + void SetUp() + { + typedef ci::SegmentedStack< gc_type, item > stack_type; + + cds::gc::hp::GarbageCollector::Construct( stack_type::c_nHazardPtrCount, 1, 16 ); + cds::threading::Manager::attachThread(); + } + + void TearDown() + { + cds::threading::Manager::detachThread(); + cds::gc::hp::GarbageCollector::Destruct( true ); + } + + template + void check_array( V& arr ) + { + for ( size_t i = 0; i < arr.size(); ++i ) { + EXPECT_EQ( arr[i].nDisposeCount, 2u ); + EXPECT_EQ( arr[i].nDispose2Count, 1u ); + } + } + }; + + TEST_F( IntrusiveSegmentedStack_HP, defaulted ) + { + struct stack_traits : public cds::intrusive::segmented_stack::traits + { + typedef Disposer disposer; + }; + typedef cds::intrusive::SegmentedStack< gc_type, item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + + TEST_F( IntrusiveSegmentedStack_HP, mutex ) + { + struct stack_traits : public + cds::intrusive::segmented_stack::make_traits < + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::lock_type < std::mutex > + > ::type + {}; + typedef cds::intrusive::SegmentedStack< gc_type, item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + + TEST_F( IntrusiveSegmentedStack_HP, padding ) + { + struct stack_traits : public cds::intrusive::segmented_stack::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding }; + typedef ci::segmented_stack::stat<> stat; + }; + typedef cds::intrusive::SegmentedStack< gc_type, item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + + TEST_F( IntrusiveSegmentedStack_HP, bigdata_padding ) + { + struct stack_traits : public cds::intrusive::segmented_stack::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only }; + typedef cds::opt::v::sequential_consistent memory_model; + }; + typedef cds::intrusive::SegmentedStack< gc_type, big_item, stack_traits > stack_type; + + std::vector arr; + { + stack_type s( c_QuasiFactor ); + test( s, arr ); + } + stack_type::gc::force_dispose(); + check_array( arr ); + } + +} // namespace + diff --git a/test/unit/stack/segmented_stack_dhp.cpp b/test/unit/stack/segmented_stack_dhp.cpp new file mode 100644 index 000000000..7a6a009bd --- /dev/null +++ b/test/unit/stack/segmented_stack_dhp.cpp @@ -0,0 +1,86 @@ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) + +#include "test_segmented_stack.h" + +#include +#include + +namespace { + namespace cc = cds::container; + typedef cds::gc::DHP gc_type; + + + class SegmentedStack_DHP : public cds_test::segmented_stack + { + protected: + static const size_t c_QuasiFactor = 15; + void SetUp() + { + typedef cc::SegmentedStack< gc_type, int > stack_type; + + cds::gc::dhp::smr::construct( stack_type::c_nHazardPtrCount ); + cds::threading::Manager::attachThread(); + } + + void TearDown() + { + cds::threading::Manager::detachThread(); + cds::gc::dhp::smr::destruct(); + } + }; + + TEST_F( SegmentedStack_DHP, defaulted ) + { + typedef cds::container::SegmentedStack< gc_type, int > test_stack; + + test_stack s( c_QuasiFactor ); + ASSERT_EQ( s.quasi_factor(), cds::beans::ceil2( c_QuasiFactor )); + test(s); + } + + TEST_F( SegmentedStack_DHP, mutex ) + { + struct traits : public cds::container::segmented_stack::traits + { + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::SegmentedStack< gc_type, int, traits > test_stack; + + test_stack s( c_QuasiFactor ); + ASSERT_EQ( s.quasi_factor(), cds::beans::ceil2( c_QuasiFactor )); + test( s ); + } + + TEST_F( SegmentedStack_DHP, shuffle ) + { + struct traits : public cds::container::segmented_stack::traits + { + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::SegmentedStack< gc_type, int, traits > test_stack; + + test_stack s( c_QuasiFactor ); + ASSERT_EQ( s.quasi_factor(), cds::beans::ceil2( c_QuasiFactor )); + test( s ); + } + + TEST_F( SegmentedStack_DHP, stat ) + { + struct traits : public + cds::container::segmented_stack::make_traits < + cds::opt::item_counter< cds::atomicity::item_counter > + , cds::opt::stat < cds::container::segmented_stack::stat<> > + > ::type + {}; + typedef cds::container::SegmentedStack< gc_type, int, traits > test_stack; + + test_stack s( c_QuasiFactor ); + ASSERT_EQ( s.quasi_factor(), cds::beans::ceil2( c_QuasiFactor )); + test( s ); + } + +} // namespace + diff --git a/test/unit/stack/segmented_stack_hp.cpp b/test/unit/stack/segmented_stack_hp.cpp new file mode 100644 index 000000000..1d17ca80b --- /dev/null +++ b/test/unit/stack/segmented_stack_hp.cpp @@ -0,0 +1,73 @@ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) + +#include "test_segmented_stack.h" + +#include +#include + +namespace { + namespace cc = cds::container; + typedef cds::gc::HP gc_type; + + + class SegmentedStack_HP : public cds_test::segmented_stack + { + protected: + static const size_t c_QuasiFactor = 15; + void SetUp() + { + typedef cc::SegmentedStack< gc_type, int > stack_type; + + cds::gc::hp::GarbageCollector::Construct( stack_type::c_nHazardPtrCount, 1, 16 ); + cds::threading::Manager::attachThread(); + } + + void TearDown() + { + cds::threading::Manager::detachThread(); + cds::gc::hp::GarbageCollector::Destruct( true ); + } + }; + + TEST_F( SegmentedStack_HP, defaulted ) + { + typedef cds::container::SegmentedStack< gc_type, int > test_stack; + + test_stack s( c_QuasiFactor ); + ASSERT_EQ( s.quasi_factor(), cds::beans::ceil2( c_QuasiFactor )); + test(s); + } + + TEST_F( SegmentedStack_HP, mutex ) + { + struct traits : public cds::container::segmented_stack::traits + { + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::SegmentedStack< cds::gc::HP, int, traits > test_stack; + + test_stack s( c_QuasiFactor ); + ASSERT_EQ( s.quasi_factor(), cds::beans::ceil2( c_QuasiFactor )); + test( s ); + } + + TEST_F( SegmentedStack_HP, stat ) + { + struct traits : public + cds::container::segmented_stack::make_traits < + cds::opt::item_counter< cds::atomicity::item_counter > + , cds::opt::stat < cds::container::segmented_stack::stat<> > + > ::type + {}; + typedef cds::container::SegmentedStack< cds::gc::HP, int, traits > test_stack; + + test_stack s( c_QuasiFactor ); + ASSERT_EQ( s.quasi_factor(), cds::beans::ceil2( c_QuasiFactor )); + test( s ); + } + +} // namespace + diff --git a/test/unit/stack/test_intrusive_segmented_stack.h b/test/unit/stack/test_intrusive_segmented_stack.h new file mode 100644 index 000000000..fa4b80c22 --- /dev/null +++ b/test/unit/stack/test_intrusive_segmented_stack.h @@ -0,0 +1,153 @@ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) + +#ifndef CDSUNIT_STACK_TEST_INTRUSIVE_SEGMENTED_STACK_H +#define CDSUNIT_STACK_TEST_INTRUSIVE_SEGMENTED_STACK_H + +#include + +namespace cds_test { + + class intrusive_segmented_stack : public ::testing::Test + { + protected: + struct item { + int nValue; + + size_t nDisposeCount; + size_t nDispose2Count; + + item() + : nValue( 0 ) + , nDisposeCount( 0 ) + , nDispose2Count( 0 ) + {} + + item( int nVal ) + : nValue( nVal ) + , nDisposeCount( 0 ) + , nDispose2Count( 0 ) + {} + }; + + struct big_item : public item + { + big_item() + {} + + big_item( int nVal ) + : item( nVal ) + {} + + int arr[80]; + }; + + struct Disposer + { + void operator()( item * p ) + { + ++(p->nDisposeCount); + } + }; + + struct Disposer2 + { + void operator()( item * p ) + { + ++(p->nDispose2Count); + } + }; + + template + void test( Stack& s, Data& val ) + { + typedef typename Stack::value_type value_type; + val.resize( 100 ); + for ( size_t i = 0; i < val.size(); ++i ) + val[i].nValue = static_cast( i ); + + ASSERT_TRUE( s.empty()); + ASSERT_CONTAINER_SIZE( s, 0u ); + + // push + for ( size_t i = 0; i < val.size(); ++i ) { + ASSERT_TRUE(s.push( val[i]) ); + + ASSERT_CONTAINER_SIZE( s, i + 1 ); + } + EXPECT_TRUE( !s.empty()); + + // pop + size_t nCount = 0; + while ( !s.empty()) { + value_type * pVal; + pVal = s.pop(); + + ASSERT_TRUE( pVal != nullptr ); + + ++nCount; + EXPECT_CONTAINER_SIZE( s, val.size() - nCount ); + } + EXPECT_EQ( nCount, val.size()); + EXPECT_TRUE( s.empty()); + EXPECT_CONTAINER_SIZE( s, 0u ); + + // pop from empty stack + ASSERT_TRUE( s.pop() == nullptr ); + EXPECT_TRUE( s.empty()); + EXPECT_CONTAINER_SIZE( s, 0u ); + + // check that Disposer has not been called + Stack::gc::force_dispose(); + for ( size_t i = 0; i < val.size(); ++i ) { + EXPECT_EQ( val[i].nDisposeCount, 0u ); + EXPECT_EQ( val[i].nDispose2Count, 0u ); + } + + // clear + for ( size_t i = 0; i < val.size(); ++i ) + EXPECT_TRUE( s.push( val[i] )); + EXPECT_CONTAINER_SIZE( s, val.size()); + EXPECT_TRUE( !s.empty()); + + s.clear(); + EXPECT_CONTAINER_SIZE( s, 0u ); + EXPECT_TRUE( s.empty()); + + // check if Disposer has been called + Stack::gc::force_dispose(); + for ( size_t i = 0; i < val.size(); ++i ) { + EXPECT_EQ( val[i].nDisposeCount, 1u ); + EXPECT_EQ( val[i].nDispose2Count, 0u ); + } + + // clear_with + for ( size_t i = 0; i < val.size(); ++i ) + EXPECT_TRUE( s.push( val[i] )); + EXPECT_CONTAINER_SIZE( s, val.size()); + EXPECT_TRUE( !s.empty()); + + s.clear_with( Disposer2() ); + EXPECT_CONTAINER_SIZE( s, 0u ); + EXPECT_TRUE( s.empty()); + + // check if Disposer has been called + Stack::gc::force_dispose(); + for ( size_t i = 0; i < val.size(); ++i ) { + EXPECT_EQ( val[i].nDisposeCount, 1u ); + EXPECT_EQ( val[i].nDispose2Count, 1u ); + } + + // check clear on destruct + for ( size_t i = 0; i < val.size(); ++i ) + EXPECT_TRUE( s.push( val[i] )); + EXPECT_CONTAINER_SIZE( s, val.size()); + EXPECT_TRUE( !s.empty()); + } + }; + +} // namespace cds_test + +#endif // CDSUNIT_STACK_TEST_INTRUSIVE_SEGMENTED_STACK_H diff --git a/test/unit/stack/test_segmented_stack.h b/test/unit/stack/test_segmented_stack.h new file mode 100644 index 000000000..867c4c18f --- /dev/null +++ b/test/unit/stack/test_segmented_stack.h @@ -0,0 +1,64 @@ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) + +#ifndef CDSUNIT_STACK_TEST_SEGMENTED_STACK_H +#define CDSUNIT_STACK_TEST_SEGMENTED_STACK_H + +#include + +namespace cds_test { + + class segmented_stack : public ::testing::Test + { + protected: + template + void test( Stack& s ) + { + typedef typename Stack::value_type value_type; + value_type it; + + const size_t nSize = 100; + + ASSERT_TRUE( s.empty()); + ASSERT_CONTAINER_SIZE( s, 0 ); + // push/pop + for ( size_t i = 0; i < nSize; ++i ) { + it = static_cast(i); + ASSERT_TRUE( s.push( it )); + ASSERT_CONTAINER_SIZE( s, i + 1 ); + } + ASSERT_FALSE( s.empty()); + ASSERT_CONTAINER_SIZE( s, nSize ); + + for ( size_t i = 0; i < nSize; ++i ) { + it = -1; + ASSERT_TRUE( s.pop(it)); + ASSERT_CONTAINER_SIZE( s, nSize - i - 1 ); + } + ASSERT_TRUE( s.empty()); + ASSERT_CONTAINER_SIZE( s, 0 ); + + // clear + for ( size_t i = 0; i < nSize; ++i ) { + ASSERT_TRUE( s.push( static_cast(i))); + } + ASSERT_FALSE( s.empty()); + ASSERT_CONTAINER_SIZE( s, nSize ); + s.clear(); + ASSERT_TRUE( s.empty()); + ASSERT_CONTAINER_SIZE( s, 0 ); + + // pop from empty stack + it = nSize * 2; + ASSERT_FALSE( s.pop( it )); + ASSERT_EQ( it, static_cast( nSize * 2 )); + ASSERT_TRUE( s.empty()); + ASSERT_CONTAINER_SIZE( s, 0 ); + } + }; + +} // namespace cds_test + +#endif // CDSUNIT_STACK_TEST_SEGMENTED_STACK_H From ab2f2866512829c0f1acc2f2eed6ad5db183d6a6 Mon Sep 17 00:00:00 2001 From: Veelz Date: Sat, 26 Jan 2019 13:46:10 +0300 Subject: [PATCH 2/6] Update license and fix VC project properties --- cds/container/segmented_stack.h | 34 +++---------------------- projects/Win/vc141/cds.vcxproj | 2 ++ projects/Win/vc141/cds.vcxproj.filters | 6 +++++ projects/Win/vc141/stress-stack.vcxproj | 2 +- 4 files changed, 13 insertions(+), 31 deletions(-) diff --git a/cds/container/segmented_stack.h b/cds/container/segmented_stack.h index 61c494714..1d237ac35 100644 --- a/cds/container/segmented_stack.h +++ b/cds/container/segmented_stack.h @@ -1,32 +1,7 @@ -/* - This file is a part of libcds - Concurrent Data Structures library - - (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 - - Source code repo: http://github.com/khizmax/libcds/ - Download: http://sourceforge.net/projects/libcds/files/ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ +// Copyright (c) 2006-2018 Maxim Khizhinsky +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef CDSLIB_CONTAINER_SEGMENTED_STACK_H #define CDSLIB_CONTAINER_SEGMENTED_STACK_H @@ -34,7 +9,6 @@ #include #include // ref #include -// #include namespace cds { namespace container { diff --git a/projects/Win/vc141/cds.vcxproj b/projects/Win/vc141/cds.vcxproj index 9b904397c..092d32120 100644 --- a/projects/Win/vc141/cds.vcxproj +++ b/projects/Win/vc141/cds.vcxproj @@ -1108,6 +1108,7 @@ + @@ -1189,6 +1190,7 @@ + diff --git a/projects/Win/vc141/cds.vcxproj.filters b/projects/Win/vc141/cds.vcxproj.filters index d9c34bb92..3f5a07029 100644 --- a/projects/Win/vc141/cds.vcxproj.filters +++ b/projects/Win/vc141/cds.vcxproj.filters @@ -1258,5 +1258,11 @@ Header Files\cds\details + + Header Files\cds\intrusive + + + Header Files\cds\container + \ No newline at end of file diff --git a/projects/Win/vc141/stress-stack.vcxproj b/projects/Win/vc141/stress-stack.vcxproj index 12e4d58bc..6a92e8728 100644 --- a/projects/Win/vc141/stress-stack.vcxproj +++ b/projects/Win/vc141/stress-stack.vcxproj @@ -549,7 +549,7 @@ true true $(GTEST_LIB32);$(GTEST_ROOT)/lib/x86;$(BOOST_PATH)/stage32/lib;$(BOOST_PATH)/stage/lib;$(BOOST_PATH)/bin;%(AdditionalLibraryDirectories);$(OutDir) - gtest.lib;stress-framework.lib;libcds-$(PlatformTarget).lib;%(AdditionalDependencies) + gtest.lib;stress-framework.lib;%(AdditionalDependencies) From 6a7fdd12061b96e55de8c58d06c543aaed1900ed Mon Sep 17 00:00:00 2001 From: Veelz Date: Fri, 1 Feb 2019 22:41:15 +0300 Subject: [PATCH 3/6] commit --- cds/intrusive/segmented_stack.h | 259 ++++++++++++++++++----- test/stress/stack/intrusive_push_pop.cpp | 4 +- test/stress/stack/intrusive_stack_type.h | 2 +- 3 files changed, 209 insertions(+), 56 deletions(-) diff --git a/cds/intrusive/segmented_stack.h b/cds/intrusive/segmented_stack.h index 1720d6bbe..63b02f871 100644 --- a/cds/intrusive/segmented_stack.h +++ b/cds/intrusive/segmented_stack.h @@ -43,6 +43,10 @@ namespace cds { namespace intrusive { counter_type m_nSegmentCreated; ///< Number of created segments counter_type m_nSegmentDeleted; ///< Number of deleted segments + counter_type m_nSucceededCommits; + counter_type m_nFailedCommits; + counter_type m_nPushToRetiredSegment; + //@cond void onPush() { ++m_nPush; } void onPushPopulated() { ++m_nPushPopulated; } @@ -54,6 +58,9 @@ namespace cds { namespace intrusive { void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } void onSegmentCreated() { ++m_nSegmentCreated; } void onSegmentDeleted() { ++m_nSegmentDeleted; } + void onSucceededCommit() { ++m_nSucceededCommits; } + void onFailedCommit() { ++m_nFailedCommits; } + void onPushToRetiredSegment() { ++m_nPushToRetiredSegment; } //@endcond }; @@ -70,6 +77,9 @@ namespace cds { namespace intrusive { void onDeleteSegmentReq() const {} void onSegmentCreated() const {} void onSegmentDeleted() const {} + void onSucceededCommit() const {} + void onFailedCommit() const {} + void onPushToRetiredSegment() const {} //@endcond }; @@ -153,7 +163,7 @@ namespace cds { namespace intrusive { typedef typename traits::stat stat; ///< Internal statistics policy typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments. - static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm + static const size_t c_nHazardPtrCount = 5 ; ///< Count of hazard pointer required for the algorithm protected: //@cond @@ -166,16 +176,16 @@ namespace cds { namespace intrusive { struct segment: public boost::intrusive::slist_base_hook<> { cell * cells; // Cell array of size \ref m_nQuasiFactor + bool retired; size_t version; // version tag (ABA prevention tag) // cell array is placed here in one continuous memory block - bool retired; // Initializes the segment explicit segment( size_t nCellCount ) // MSVC warning C4355: 'this': used in base member initializer list : cells( reinterpret_cast< cell *>( this + 1 )) - , version( 0 ) , retired( false ) + , version( 0 ) { init( nCellCount ); } @@ -269,8 +279,13 @@ namespace cds { namespace intrusive { // The lock should be held cell const * pLastCell = s.cells + quasi_factor(); for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { - if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits()) - return false; + auto item = pCell->data.load(memory_model::memory_order_relaxed); + if (item.ptr() && !item.bits()) + { + return false; + } + //if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits()) + // return false; } return true; } @@ -299,6 +314,7 @@ namespace cds { namespace intrusive { m_List.push_front( *pNew ); m_pHead.store(pNew, memory_model::memory_order_release); + return guard.assign( pNew ); } @@ -306,7 +322,8 @@ namespace cds { namespace intrusive { { // pHead is guarded by GC m_Stat.onDeleteSegmentReq(); - + //pHead->retired = true; + segment * pRet; { scoped_lock l( m_Lock ); @@ -316,16 +333,19 @@ namespace cds { namespace intrusive { m_pHead.store( nullptr, memory_model::memory_order_relaxed ); return guard.assign( nullptr ); } - + if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) { + // pHead->retired = true; m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed ); return guard.assign( &m_List.front()); } + # ifdef _DEBUG - // assert( exhausted( m_List.front())); + //assert( exhausted( m_List.front())); # endif - + pHead->retired = true; + m_List.pop_front(); if ( m_List.empty()) { pRet = guard.assign( nullptr ); @@ -336,8 +356,7 @@ namespace cds { namespace intrusive { m_pHead.store( pRet, memory_model::memory_order_release ); } - pHead->retired = true; - retire_segment( pHead ); + //retire_segment( pHead ); m_Stat.onSegmentDeleted(); return pRet; @@ -348,6 +367,20 @@ namespace cds { namespace intrusive { return m_nQuasiFactor; } + void increment_head(segment *pSegment) + { + scoped_lock lock( m_Lock ); + if (pSegment) { + pSegment->version++; + } + } + + bool retired(segment *pSegment) + { + scoped_lock lock(m_Lock); + return (pSegment->retired); + } + private: typedef cds::details::Allocator< segment, allocator > segment_allocator; @@ -405,39 +438,84 @@ namespace cds { namespace intrusive { // LSB is used as a flag in marked pointer assert( (reinterpret_cast( &val ) & 1) == 0 ); - typename gc::Guard segmentGuard; - segment * pHeadSegment = m_SegmentList.head( segmentGuard ); - if ( !pHeadSegment) { - // no segments, create the new one - pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); - assert(pHeadSegment); - } ++m_ItemCounter; + typename gc::Guard segmentGuard; while ( true ) { - regular_cell nullCell; - size_t index = 0; - if ( find_empty_cell(pHeadSegment, nullCell, index) ) + segment * pHeadSegment = m_SegmentList.head(segmentGuard); + if ( !pHeadSegment) { + // no segments, create the new one + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); + assert(pHeadSegment); + } + /* + typename gc::Guard segGuard; + size_t i = 0; + size_t qf = quasi_factor(); + do { - typename gc::Guard segGuard; - if (pHeadSegment == m_SegmentList.head(segGuard)) - { + if (pHeadSegment == m_SegmentList.head(segGuard)) { + regular_cell nullCell; + typename gc::Guard itemGuard; regular_cell newCell(&val); - if (pHeadSegment->cells[index].data.compare_exchange_strong(nullCell, newCell, - memory_model::memory_order_release, atomics::memory_order_relaxed)) + itemGuard.assign(newCell.ptr()); + if (pHeadSegment->cells[i].data.compare_exchange_strong(nullCell, newCell)) { + if (committed(pHeadSegment, newCell, i)) + { + m_Stat.onPush(); + return true; + } + } + } + else { + break; + } + i++; + } while (i < qf); + + if (pHeadSegment == m_SegmentList.head(segGuard)) + { + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); + } + else + { + pHeadSegment = m_SegmentList.head(segmentGuard); + } + */ + + size_t index = 0; + typename gc::Guard itemGuard; + bool found = find_empty_cell(pHeadSegment, index); + + typename gc::Guard segGuard; + if (pHeadSegment == m_SegmentList.head(segGuard)) { + if (found) { + regular_cell nullCell; + regular_cell newCell( &val ); + itemGuard.assign(newCell.ptr()); + //if (pHeadSegment->cells[index].data.compare_exchange_strong(nullCell, newCell, + // memory_model::memory_order_release, atomics::memory_order_relaxed)) + if (pHeadSegment->cells[index].data.compare_exchange_strong(nullCell, newCell)) { if ( committed(pHeadSegment, newCell, index) ) { m_Stat.onPush(); return true; } } + m_Stat.onPushContended(); + } + else { + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); } - // segment or segment list was updated - continue; + } + //else if (!m_SegmentList.head(segGuard)) { + // continue; + //pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); + //} // No available position, create a new segment - pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); + //pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); } } @@ -496,19 +574,18 @@ namespace cds { namespace intrusive { protected: //@cond - bool find_empty_cell(segment *pHeadSegment, regular_cell &item, size_t &index) + bool find_empty_cell(segment *pHeadSegment, size_t &index) { size_t i = 0; size_t qf = quasi_factor(); do { - regular_cell cell = pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed); - if (cell.all()) { + // regular_cell cell = pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed); + if (pHeadSegment->cells[i].data.load().all()) { // Cell is not empty, go next m_Stat.onPushPopulated(); } else { // empty cell is found - item = cell; index = i; return true; } @@ -519,43 +596,115 @@ namespace cds { namespace intrusive { } //@endcond + //@cond + bool find_item(segment *pHeadSegment, regular_cell &item, typename gc::Guard &itemGuard, size_t &index) + { + size_t i = quasi_factor() - 1; + // regular_cell item; + // typename gc::Guard itemGuard; + do + { + item = pHeadSegment->cells[i].data.load(); + itemGuard.assign(item.ptr()); + //item = pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed); + if (item.ptr()) + { + if (!item.bits()) + { + index = i; + return true; + } + } + --i; + } while (i >= 0); + + return false; + } + //@endcond + //@cond bool committed(segment *pHeadSegment, regular_cell &new_item, size_t index) { - if (pHeadSegment->cells[index].data.load() != new_item) + //if (pHeadSegment->cells[index].data.load() != new_item) + m_SegmentList.increment_head(pHeadSegment); + if (new_item.bits()) { + m_Stat.onSucceededCommit(); return true; } - else if (!pHeadSegment->retired) + else if (!m_SegmentList.retired(pHeadSegment)) { + m_Stat.onSucceededCommit(); return true; } - else // top_old->retired == true + else // pHeadSegment->retired == true + { + if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) + { + m_Stat.onSucceededCommit(); + return true; + } + } + /* + else // pHeadSegment->retired == true { + m_Stat.onPushToRetiredSegment(); + // m_SegmentList.increment_version(pHeadSegment); + // try to change version that would force threads retry their remove op. + //++pHeadSegment->version; + typename gc::Guard segmentGuard; + if (pHeadSegment == m_SegmentList.head(segmentGuard)) + { + m_SegmentList.increment_head(pHeadSegment); + m_Stat.onSucceededCommit(); + return true; + } + //pHeadSegment = m_SegmentList.head(segmentGuard); + //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) + //{ + // m_Stat.onSucceededCommit(); + // return true; + //} + } + /* + else // pHeadSegment->retired == true + { + m_Stat.onPushToRetiredSegment(); typename gc::Guard segmentGuard; regular_cell nullCell; if (pHeadSegment != m_SegmentList.head(segmentGuard)) - { - if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, - memory_model::memory_order_release, atomics::memory_order_relaxed)) + { // segment with head pHeadSegments was removed + // try undo insertion + //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, + // memory_model::memory_order_release, atomics::memory_order_relaxed)) + //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell)) + if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) { + m_Stat.onSucceededCommit(); return true; } } else { - pHeadSegment->version++; + // try to change version that would force threads retry their remove op. + ++pHeadSegment->version; if ( pHeadSegment == m_SegmentList.head(segmentGuard) ) { + m_Stat.onSucceededCommit(); return true; } - if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, - memory_model::memory_order_release, atomics::memory_order_relaxed)) + //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, + // memory_model::memory_order_release, atomics::memory_order_relaxed)) + //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell)) + if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) { + m_Stat.onSucceededCommit(); return true; } } } + */ + m_Stat.onFailedCommit(); return false; } //@endcond @@ -564,36 +713,37 @@ namespace cds { namespace intrusive { bool do_pop( typename gc::Guard& itemGuard ) { typename gc::Guard segmentGuard; - segment * pHeadSegment = m_SegmentList.head(segmentGuard); while (true) { + CDS_DEBUG_ONLY(size_t nLoopCount = 0); + segment * pHeadSegment = m_SegmentList.head(segmentGuard); if (!pHeadSegment) { // Stack is empty m_Stat.onPopEmpty(); return false; } - regular_cell item; - CDS_DEBUG_ONLY(size_t nLoopCount = 0); size_t i = 1; size_t qf = quasi_factor(); + regular_cell item; do { CDS_DEBUG_ONLY(++nLoopCount); - // Guard the item // In segmented stack the cell cannot be reused // So no loop is needed here to protect the cell - item = pHeadSegment->cells[qf - i].data.load(memory_model::memory_order_relaxed); + item = pHeadSegment->cells[qf - i].data.load(); + //item = pHeadSegment->cells[qf - i].data.load(memory_model::memory_order_relaxed); itemGuard.assign(item.ptr()); // Check if this cell is empty, which means an element // can be pushed to this cell in the future if (item.ptr()) { // If the item is not deleted yet - if (!item.bits()) + if (!item.bits()) { + typename gc::Guard segGuard; // Try to mark the cell as deleted - if (pHeadSegment->cells[qf - i].data.compare_exchange_strong(item, item | 1, - memory_model::memory_order_acquire, atomics::memory_order_relaxed)) + if (pHeadSegment->cells[qf - i].data.compare_exchange_strong(item, item | 1)) + //memory_model::memory_order_acquire, atomics::memory_order_relaxed)) { --m_ItemCounter; m_Stat.onPop(); @@ -602,14 +752,17 @@ namespace cds { namespace intrusive { } assert(item.bits()); m_Stat.onPopContended(); - continue; + //bContented = true; + //i = 1; + //continue; } } i++; } while (i <= qf); - // All nodes have been poped, we can safely remove the first segment - pHeadSegment = m_SegmentList.remove_head(pHeadSegment, segmentGuard); + //if (m_SegmentList.exhausted(*const_cast(pHeadSegment))) + // if (i > qf && !bContented) + pHeadSegment = m_SegmentList.remove_head(pHeadSegment, segmentGuard); } } //@endcond diff --git a/test/stress/stack/intrusive_push_pop.cpp b/test/stress/stack/intrusive_push_pop.cpp index 5e8da970b..aaea17547 100644 --- a/test/stress/stack/intrusive_push_pop.cpp +++ b/test/stress/stack/intrusive_push_pop.cpp @@ -103,7 +103,7 @@ namespace { Stack::gc::force_dispose(); } }; - + /* // TreiberStack #define CDSSTRESS_Stack_F( test_fixture, stack_impl ) \ TEST_F( test_fixture, stack_impl ) \ @@ -165,7 +165,7 @@ namespace { CDSSTRESS_StdStack( intrusive_stack_push_pop ) #undef CDSSTRESS_Stack_F - + */ // SegmentedStack #define CDSSTRESS_Stack_F( test_fixture, stack_impl ) \ TEST_F( test_fixture, stack_impl ) \ diff --git a/test/stress/stack/intrusive_stack_type.h b/test/stress/stack/intrusive_stack_type.h index 57993c462..9432a10ab 100644 --- a/test/stress/stack/intrusive_stack_type.h +++ b/test/stress/stack/intrusive_stack_type.h @@ -506,9 +506,9 @@ namespace cds_test { CDSSTRESS_Stack_F( test_fixture, FCStack_slist_mutex_elimination_stat ) \ #define CDSSTRESS_SegmentedStack( test_fixture ) \ + CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_spin_stat ) \ CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_spin ) \ CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_spin_padding ) \ - CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_spin_stat ) \ CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_mutex ) \ CDSSTRESS_Stack_F( test_fixture, SegmentedStack_HP_mutex_stat ) \ CDSSTRESS_Stack_F( test_fixture, SegmentedStack_DHP_mutex ) \ From 111e262d0132832115700a6d79a06cf0b254b2d6 Mon Sep 17 00:00:00 2001 From: Veelz Date: Sat, 2 Feb 2019 14:41:02 +0300 Subject: [PATCH 4/6] add empty check before segment removing --- cds/intrusive/segmented_stack.h | 335 +++++++++----------------------- 1 file changed, 95 insertions(+), 240 deletions(-) diff --git a/cds/intrusive/segmented_stack.h b/cds/intrusive/segmented_stack.h index 63b02f871..6024ed753 100644 --- a/cds/intrusive/segmented_stack.h +++ b/cds/intrusive/segmented_stack.h @@ -40,27 +40,24 @@ namespace cds { namespace intrusive { counter_type m_nCreateSegmentReq; ///< Number of request to create new segment counter_type m_nDeleteSegmentReq; ///< Number to request to delete segment - counter_type m_nSegmentCreated; ///< Number of created segments - counter_type m_nSegmentDeleted; ///< Number of deleted segments - - counter_type m_nSucceededCommits; - counter_type m_nFailedCommits; - counter_type m_nPushToRetiredSegment; + counter_type m_nSegmentCreated; ///< Number of created segments + counter_type m_nSegmentDeleted; ///< Number of deleted segments + counter_type m_nSucceededCommits; ///< Number of succeeded commits + counter_type m_nFailedCommits; ///< Number of failed commits //@cond - void onPush() { ++m_nPush; } - void onPushPopulated() { ++m_nPushPopulated; } - void onPushContended() { ++m_nPushContended; } - void onPop() { ++m_nPop; } - void onPopEmpty() { ++m_nPopEmpty; } - void onPopContended() { ++m_nPopContended; } - void onCreateSegmentReq() { ++m_nCreateSegmentReq; } - void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } - void onSegmentCreated() { ++m_nSegmentCreated; } - void onSegmentDeleted() { ++m_nSegmentDeleted; } - void onSucceededCommit() { ++m_nSucceededCommits; } - void onFailedCommit() { ++m_nFailedCommits; } - void onPushToRetiredSegment() { ++m_nPushToRetiredSegment; } + void onPush() { ++m_nPush; } + void onPushPopulated() { ++m_nPushPopulated; } + void onPushContended() { ++m_nPushContended; } + void onPop() { ++m_nPop; } + void onPopEmpty() { ++m_nPopEmpty; } + void onPopContended() { ++m_nPopContended; } + void onCreateSegmentReq() { ++m_nCreateSegmentReq; } + void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } + void onSegmentCreated() { ++m_nSegmentCreated; } + void onSegmentDeleted() { ++m_nSegmentDeleted; } + void onSucceededCommit() { ++m_nSucceededCommits; } + void onFailedCommit() { ++m_nFailedCommits; } //@endcond }; @@ -77,9 +74,8 @@ namespace cds { namespace intrusive { void onDeleteSegmentReq() const {} void onSegmentCreated() const {} void onSegmentDeleted() const {} - void onSucceededCommit() const {} - void onFailedCommit() const {} - void onPushToRetiredSegment() const {} + void onSucceededCommit() const {} + void onFailedCommit() const {} //@endcond }; @@ -163,7 +159,7 @@ namespace cds { namespace intrusive { typedef typename traits::stat stat; ///< Internal statistics policy typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments. - static const size_t c_nHazardPtrCount = 5 ; ///< Count of hazard pointer required for the algorithm + static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm protected: //@cond @@ -176,7 +172,7 @@ namespace cds { namespace intrusive { struct segment: public boost::intrusive::slist_base_hook<> { cell * cells; // Cell array of size \ref m_nQuasiFactor - bool retired; + bool retired; // Mark that indicates is the segment removed size_t version; // version tag (ABA prevention tag) // cell array is placed here in one continuous memory block @@ -322,59 +318,51 @@ namespace cds { namespace intrusive { { // pHead is guarded by GC m_Stat.onDeleteSegmentReq(); - //pHead->retired = true; segment * pRet; { scoped_lock l( m_Lock ); if ( m_List.empty()) { -// m_pTail.store( nullptr, memory_model::memory_order_relaxed ); m_pHead.store( nullptr, memory_model::memory_order_relaxed ); return guard.assign( nullptr ); } if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) { - // pHead->retired = true; m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed ); return guard.assign( &m_List.front()); } - - -# ifdef _DEBUG - //assert( exhausted( m_List.front())); -# endif - pHead->retired = true; - m_List.pop_front(); - if ( m_List.empty()) { - pRet = guard.assign( nullptr ); -// m_pTail.store( nullptr, memory_model::memory_order_relaxed ); - } - else - pRet = guard.assign( &m_List.front()); - m_pHead.store( pRet, memory_model::memory_order_release ); + if ( empty(pHead) ) + { // pHead is empty + pHead->retired = true; + m_List.pop_front(); + if ( m_List.empty()) { + pRet = guard.assign( nullptr ); + } + else + pRet = guard.assign( &m_List.front()); + m_pHead.store( pRet, memory_model::memory_order_release ); + } + else + { // pHead is not empty + // since the lock is held, no one changed pHead + return pHead; + } } - //retire_segment( pHead ); + retire_segment( pHead ); m_Stat.onSegmentDeleted(); return pRet; } + size_t quasi_factor() const { return m_nQuasiFactor; } - void increment_head(segment *pSegment) - { - scoped_lock lock( m_Lock ); - if (pSegment) { - pSegment->version++; - } - } - bool retired(segment *pSegment) { scoped_lock lock(m_Lock); @@ -384,6 +372,27 @@ namespace cds { namespace intrusive { private: typedef cds::details::Allocator< segment, allocator > segment_allocator; + bool empty(segment const * s) + { + cell const * pLastCell = s->cells + quasi_factor(); + for (cell const * pCell = s->cells; pCell < pLastCell; ++pCell) { + auto item = pCell->data.load(memory_model::memory_order_relaxed); + if (item.ptr() && !item.bits()) + { // segments contains non deleted items + return false; + } + } + // check to the segment slots not changed + for (cell const * pCell = s->cells; pCell < pLastCell; ++pCell) { + auto item = pCell->data.load(memory_model::memory_order_relaxed); + if (item.ptr() && !item.bits()) + { // segments contains non deleted items + return false; + } + } + return true; + } + static size_t get_version( segment * pSegment ) { return pSegment ? pSegment->version : 0; @@ -408,7 +417,6 @@ namespace cds { namespace intrusive { protected: segment_list m_SegmentList; ///< List of segments - item_counter m_ItemCounter; ///< Item counter stat m_Stat; ///< Internal statistics @@ -434,7 +442,6 @@ namespace cds { namespace intrusive { /// Inserts a new element at last segment of the stack bool push( value_type& val ) { - // return true; // LSB is used as a flag in marked pointer assert( (reinterpret_cast( &val ) & 1) == 0 ); @@ -448,18 +455,25 @@ namespace cds { namespace intrusive { pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); assert(pHeadSegment); } - /* + typename gc::Guard segGuard; size_t i = 0; size_t qf = quasi_factor(); do { - if (pHeadSegment == m_SegmentList.head(segGuard)) { - regular_cell nullCell; + if ( pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed).all() ) + { + // Cell is not empty, go next + m_Stat.onPushPopulated(); + } + else + { typename gc::Guard itemGuard; + regular_cell nullCell; regular_cell newCell(&val); itemGuard.assign(newCell.ptr()); - if (pHeadSegment->cells[i].data.compare_exchange_strong(nullCell, newCell)) + if ( pHeadSegment->cells[i].data.compare_exchange_strong(nullCell, newCell, + memory_model::memory_order_acquire, atomics::memory_order_relaxed) ) { if (committed(pHeadSegment, newCell, i)) { @@ -467,55 +481,15 @@ namespace cds { namespace intrusive { return true; } } - } - else { - break; - } - i++; - } while (i < qf); - if (pHeadSegment == m_SegmentList.head(segGuard)) - { - pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); - } - else - { - pHeadSegment = m_SegmentList.head(segmentGuard); - } - */ - - size_t index = 0; - typename gc::Guard itemGuard; - bool found = find_empty_cell(pHeadSegment, index); - - typename gc::Guard segGuard; - if (pHeadSegment == m_SegmentList.head(segGuard)) { - if (found) { - regular_cell nullCell; - regular_cell newCell( &val ); - itemGuard.assign(newCell.ptr()); - //if (pHeadSegment->cells[index].data.compare_exchange_strong(nullCell, newCell, - // memory_model::memory_order_release, atomics::memory_order_relaxed)) - if (pHeadSegment->cells[index].data.compare_exchange_strong(nullCell, newCell)) { - if ( committed(pHeadSegment, newCell, index) ) - { - m_Stat.onPush(); - return true; - } - } + assert(nullCell.ptr()); m_Stat.onPushContended(); } - else { - pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); - } + i++; + } while (i < qf); - } - //else if (!m_SegmentList.head(segGuard)) { - // continue; - //pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); - //} - // No available position, create a new segment - //pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); + // No available position, create a new segment + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); } } @@ -572,63 +546,11 @@ namespace cds { namespace intrusive { } protected: - - //@cond - bool find_empty_cell(segment *pHeadSegment, size_t &index) - { - size_t i = 0; - size_t qf = quasi_factor(); - do { - // regular_cell cell = pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed); - if (pHeadSegment->cells[i].data.load().all()) { - // Cell is not empty, go next - m_Stat.onPushPopulated(); - } - else { - // empty cell is found - index = i; - return true; - } - ++i; - } while (i < qf); - - return false; - } - //@endcond - - //@cond - bool find_item(segment *pHeadSegment, regular_cell &item, typename gc::Guard &itemGuard, size_t &index) - { - size_t i = quasi_factor() - 1; - // regular_cell item; - // typename gc::Guard itemGuard; - do - { - item = pHeadSegment->cells[i].data.load(); - itemGuard.assign(item.ptr()); - //item = pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed); - if (item.ptr()) - { - if (!item.bits()) - { - index = i; - return true; - } - } - --i; - } while (i >= 0); - - return false; - } - //@endcond - //@cond bool committed(segment *pHeadSegment, regular_cell &new_item, size_t index) { - //if (pHeadSegment->cells[index].data.load() != new_item) - m_SegmentList.increment_head(pHeadSegment); if (new_item.bits()) - { + { // item already poped m_Stat.onSucceededCommit(); return true; } @@ -639,71 +561,16 @@ namespace cds { namespace intrusive { } else // pHeadSegment->retired == true { - if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) + // try to mark item as removed + // if not succeeded, item already poped + if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1, + memory_model::memory_order_acquire, atomics::memory_order_relaxed)) { m_Stat.onSucceededCommit(); return true; } } - /* - else // pHeadSegment->retired == true - { - m_Stat.onPushToRetiredSegment(); - // m_SegmentList.increment_version(pHeadSegment); - // try to change version that would force threads retry their remove op. - //++pHeadSegment->version; - typename gc::Guard segmentGuard; - if (pHeadSegment == m_SegmentList.head(segmentGuard)) - { - m_SegmentList.increment_head(pHeadSegment); - m_Stat.onSucceededCommit(); - return true; - } - //pHeadSegment = m_SegmentList.head(segmentGuard); - //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) - //{ - // m_Stat.onSucceededCommit(); - // return true; - //} - } - /* - else // pHeadSegment->retired == true - { - m_Stat.onPushToRetiredSegment(); - typename gc::Guard segmentGuard; - regular_cell nullCell; - if (pHeadSegment != m_SegmentList.head(segmentGuard)) - { // segment with head pHeadSegments was removed - // try undo insertion - //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, - // memory_model::memory_order_release, atomics::memory_order_relaxed)) - //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell)) - if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) - { - m_Stat.onSucceededCommit(); - return true; - } - } - else - { - // try to change version that would force threads retry their remove op. - ++pHeadSegment->version; - if ( pHeadSegment == m_SegmentList.head(segmentGuard) ) - { - m_Stat.onSucceededCommit(); - return true; - } - //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell, - // memory_model::memory_order_release, atomics::memory_order_relaxed)) - //if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, nullCell)) - if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1)) - { - m_Stat.onSucceededCommit(); - return true; - } - } - } - */ + m_Stat.onFailedCommit(); return false; } @@ -714,7 +581,6 @@ namespace cds { namespace intrusive { { typename gc::Guard segmentGuard; while (true) { - CDS_DEBUG_ONLY(size_t nLoopCount = 0); segment * pHeadSegment = m_SegmentList.head(segmentGuard); if (!pHeadSegment) { // Stack is empty @@ -730,39 +596,28 @@ namespace cds { namespace intrusive { // Guard the item // In segmented stack the cell cannot be reused // So no loop is needed here to protect the cell - item = pHeadSegment->cells[qf - i].data.load(); - //item = pHeadSegment->cells[qf - i].data.load(memory_model::memory_order_relaxed); + item = pHeadSegment->cells[qf - i].data.load(memory_model::memory_order_relaxed); itemGuard.assign(item.ptr()); - // Check if this cell is empty, which means an element - // can be pushed to this cell in the future - if (item.ptr()) - { // If the item is not deleted yet - if (!item.bits()) + // Check if this cell is not empty and not marked deleted + if (item.ptr() && !item.bits()) + { + // Try to mark the cell as deleted + if (pHeadSegment->cells[qf - i].data.compare_exchange_strong(item, item | 1, + memory_model::memory_order_acquire, atomics::memory_order_relaxed)) { - typename gc::Guard segGuard; - // Try to mark the cell as deleted - if (pHeadSegment->cells[qf - i].data.compare_exchange_strong(item, item | 1)) - //memory_model::memory_order_acquire, atomics::memory_order_relaxed)) - { - --m_ItemCounter; - m_Stat.onPop(); + --m_ItemCounter; + m_Stat.onPop(); - return true; - } - assert(item.bits()); - m_Stat.onPopContended(); - //bContented = true; - //i = 1; - //continue; + return true; } + assert(item.bits()); + m_Stat.onPopContended(); } i++; } while (i <= qf); - // All nodes have been poped, we can safely remove the first segment - //if (m_SegmentList.exhausted(*const_cast(pHeadSegment))) - // if (i > qf && !bContented) - pHeadSegment = m_SegmentList.remove_head(pHeadSegment, segmentGuard); + // No nodes is found to pop, try remove the first segment + pHeadSegment = m_SegmentList.remove_head(pHeadSegment, segmentGuard); } } //@endcond From cfde92ffa5fbe46c2e2dd67ad80f81d9fb4ad1a7 Mon Sep 17 00:00:00 2001 From: Veelz Date: Sat, 2 Feb 2019 15:03:20 +0300 Subject: [PATCH 5/6] code formatting --- cds/intrusive/segmented_stack.h | 330 ++++++++++++++++---------------- 1 file changed, 165 insertions(+), 165 deletions(-) diff --git a/cds/intrusive/segmented_stack.h b/cds/intrusive/segmented_stack.h index 2e2e3c7b0..2c67e2ec6 100644 --- a/cds/intrusive/segmented_stack.h +++ b/cds/intrusive/segmented_stack.h @@ -40,10 +40,10 @@ namespace cds { namespace intrusive { counter_type m_nCreateSegmentReq; ///< Number of request to create new segment counter_type m_nDeleteSegmentReq; ///< Number to request to delete segment - counter_type m_nSegmentCreated; ///< Number of created segments - counter_type m_nSegmentDeleted; ///< Number of deleted segments - counter_type m_nSucceededCommits; ///< Number of succeeded commits - counter_type m_nFailedCommits; ///< Number of failed commits + counter_type m_nSegmentCreated; ///< Number of created segments + counter_type m_nSegmentDeleted; ///< Number of deleted segments + counter_type m_nSucceededCommits; ///< Number of succeeded commits + counter_type m_nFailedCommits; ///< Number of failed commits //@cond void onPush() { ++m_nPush; } @@ -56,8 +56,8 @@ namespace cds { namespace intrusive { void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } void onSegmentCreated() { ++m_nSegmentCreated; } void onSegmentDeleted() { ++m_nSegmentDeleted; } - void onSucceededCommit() { ++m_nSucceededCommits; } - void onFailedCommit() { ++m_nFailedCommits; } + void onSucceededCommit() { ++m_nSucceededCommits; } + void onFailedCommit() { ++m_nFailedCommits; } //@endcond }; @@ -74,8 +74,8 @@ namespace cds { namespace intrusive { void onDeleteSegmentReq() const {} void onSegmentCreated() const {} void onSegmentDeleted() const {} - void onSucceededCommit() const {} - void onFailedCommit() const {} + void onSucceededCommit() const {} + void onFailedCommit() const {} //@endcond }; @@ -172,7 +172,7 @@ namespace cds { namespace intrusive { struct segment: public boost::intrusive::slist_base_hook<> { cell * cells; // Cell array of size \ref m_nQuasiFactor - bool retired; // Mark that indicates is the segment removed + bool retired; // Mark that indicates is the segment removed size_t version; // version tag (ABA prevention tag) // cell array is placed here in one continuous memory block @@ -180,7 +180,7 @@ namespace cds { namespace intrusive { explicit segment( size_t nCellCount ) // MSVC warning C4355: 'this': used in base member initializer list : cells( reinterpret_cast< cell *>( this + 1 )) - , retired( false ) + , retired( false ) , version( 0 ) { init( nCellCount ); @@ -275,11 +275,11 @@ namespace cds { namespace intrusive { // The lock should be held cell const * pLastCell = s.cells + quasi_factor(); for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { - auto item = pCell->data.load(memory_model::memory_order_relaxed); - if (item.ptr() && !item.bits()) - { - return false; - } + auto item = pCell->data.load(memory_model::memory_order_relaxed); + if (item.ptr() && !item.bits()) + { + return false; + } } return true; } @@ -331,25 +331,25 @@ namespace cds { namespace intrusive { return guard.assign( &m_List.front()); } - if ( empty(pHead) ) { - // pHead is empty - pHead->retired = true; - m_List.pop_front(); - if ( m_List.empty()) { - pRet = guard.assign( nullptr ); - } - else - pRet = guard.assign( &m_List.front()); - m_pHead.store( pRet, memory_model::memory_order_release ); - } - else { - // pHead is not empty - // since the lock is held, no one changed pHead - return pHead; - } + if ( empty(pHead) ) { + // pHead is empty + pHead->retired = true; + m_List.pop_front(); + if ( m_List.empty()) { + pRet = guard.assign( nullptr ); + } + else + pRet = guard.assign( &m_List.front()); + m_pHead.store( pRet, memory_model::memory_order_release ); + } + else { + // pHead is not empty + // since the lock is held, no one changed pHead + return pHead; + } } - retire_segment( pHead ); + retire_segment( pHead ); m_Stat.onSegmentDeleted(); return pRet; @@ -361,35 +361,35 @@ namespace cds { namespace intrusive { return m_nQuasiFactor; } - bool retired(segment *pSegment) - { - scoped_lock lock(m_Lock); - return (pSegment->retired); - } + bool retired(segment *pSegment) + { + scoped_lock lock(m_Lock); + return (pSegment->retired); + } private: typedef cds::details::Allocator< segment, allocator > segment_allocator; - bool empty(segment const * s) - { - cell const * pLastCell = s->cells + quasi_factor(); - for (cell const * pCell = s->cells; pCell < pLastCell; ++pCell) { - auto item = pCell->data.load(memory_model::memory_order_relaxed); - if (item.ptr() && !item.bits()) { - // segments contains non deleted items - return false; - } - } - // check to the segment slots not changed - for (cell const * pCell = s->cells; pCell < pLastCell; ++pCell) { - auto item = pCell->data.load(memory_model::memory_order_relaxed); - if (item.ptr() && !item.bits()) { - // segments contains non deleted items - return false; - } - } - return true; - } + bool empty(segment const * s) + { + cell const * pLastCell = s->cells + quasi_factor(); + for (cell const * pCell = s->cells; pCell < pLastCell; ++pCell) { + auto item = pCell->data.load(memory_model::memory_order_relaxed); + if (item.ptr() && !item.bits()) { + // segments contains non deleted items + return false; + } + } + // check to the segment slots not changed + for (cell const * pCell = s->cells; pCell < pLastCell; ++pCell) { + auto item = pCell->data.load(memory_model::memory_order_relaxed); + if (item.ptr() && !item.bits()) { + // segments contains non deleted items + return false; + } + } + return true; + } static size_t get_version( segment * pSegment ) { @@ -445,46 +445,46 @@ namespace cds { namespace intrusive { ++m_ItemCounter; - typename gc::Guard segmentGuard; - while ( true ) { - segment * pHeadSegment = m_SegmentList.head(segmentGuard); - if ( !pHeadSegment) { - // no segments, create the new one - pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); - assert(pHeadSegment); - } - - typename gc::Guard segGuard; - size_t i = 0; - size_t qf = quasi_factor(); - do - { - if ( pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed).all() ) { - // Cell is not empty, go next - m_Stat.onPushPopulated(); - } - else { - typename gc::Guard itemGuard; - regular_cell nullCell; - regular_cell newCell(&val); - itemGuard.assign(newCell.ptr()); - if ( pHeadSegment->cells[i].data.compare_exchange_strong(nullCell, newCell, - memory_model::memory_order_acquire, atomics::memory_order_relaxed) ) - { - if (committed(pHeadSegment, newCell, i)) { - m_Stat.onPush(); - return true; - } - } - - assert(nullCell.ptr()); - m_Stat.onPushContended(); - } - i++; - } while (i < qf); - - // No available position, create a new segment - pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); + typename gc::Guard segmentGuard; + while ( true ) { + segment * pHeadSegment = m_SegmentList.head(segmentGuard); + if ( !pHeadSegment) { + // no segments, create the new one + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard ); + assert(pHeadSegment); + } + + typename gc::Guard segGuard; + size_t i = 0; + size_t qf = quasi_factor(); + do + { + if ( pHeadSegment->cells[i].data.load(memory_model::memory_order_relaxed).all() ) { + // Cell is not empty, go next + m_Stat.onPushPopulated(); + } + else { + typename gc::Guard itemGuard; + regular_cell nullCell; + regular_cell newCell(&val); + itemGuard.assign(newCell.ptr()); + if ( pHeadSegment->cells[i].data.compare_exchange_strong(nullCell, newCell, + memory_model::memory_order_acquire, atomics::memory_order_relaxed) ) + { + if (committed(pHeadSegment, newCell, i)) { + m_Stat.onPush(); + return true; + } + } + + assert(nullCell.ptr()); + m_Stat.onPushContended(); + } + i++; + } while (i < qf); + + // No available position, create a new segment + pHeadSegment = m_SegmentList.create_head(pHeadSegment, segmentGuard); } } @@ -541,77 +541,77 @@ namespace cds { namespace intrusive { } protected: - //@cond - bool committed(segment *pHeadSegment, regular_cell &new_item, size_t index) - { - if (new_item.bits()) { - // item already poped - m_Stat.onSucceededCommit(); - return true; - } - else if (!m_SegmentList.retired(pHeadSegment)) { - m_Stat.onSucceededCommit(); - return true; - } - else { - // segment is marked as removed, - // so try to mark item as removed - // if not succeeded, item already poped - if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1, - memory_model::memory_order_acquire, atomics::memory_order_relaxed)) - { - m_Stat.onSucceededCommit(); - return true; - } - } - - m_Stat.onFailedCommit(); - return false; - } - //@endcond + //@cond + bool committed(segment *pHeadSegment, regular_cell &new_item, size_t index) + { + if (new_item.bits()) { + // item already poped + m_Stat.onSucceededCommit(); + return true; + } + else if (!m_SegmentList.retired(pHeadSegment)) { + m_Stat.onSucceededCommit(); + return true; + } + else { + // segment is marked as removed, + // so try to mark item as removed + // if not succeeded, item already poped + if (!pHeadSegment->cells[index].data.compare_exchange_strong(new_item, new_item | 1, + memory_model::memory_order_acquire, atomics::memory_order_relaxed)) + { + m_Stat.onSucceededCommit(); + return true; + } + } + + m_Stat.onFailedCommit(); + return false; + } + //@endcond //@cond bool do_pop( typename gc::Guard& itemGuard ) - { - typename gc::Guard segmentGuard; - while (true) { - segment * pHeadSegment = m_SegmentList.head(segmentGuard); - if (!pHeadSegment) { - // Stack is empty - m_Stat.onPopEmpty(); - return false; - } - size_t i = 1; - size_t qf = quasi_factor(); - regular_cell item; - do - { - CDS_DEBUG_ONLY(++nLoopCount); - // Guard the item - // In segmented stack the cell cannot be reused - // So no loop is needed here to protect the cell - item = pHeadSegment->cells[qf - i].data.load(memory_model::memory_order_relaxed); - itemGuard.assign(item.ptr()); - - // Check if this cell is not empty and not marked deleted - if (item.ptr() && !item.bits()) { - // Try to mark the cell as deleted - if (pHeadSegment->cells[qf - i].data.compare_exchange_strong(item, item | 1, - memory_model::memory_order_acquire, atomics::memory_order_relaxed)) - { - --m_ItemCounter; - m_Stat.onPop(); - - return true; - } - assert(item.bits()); - m_Stat.onPopContended(); - } - i++; - } while (i <= qf); - // No nodes to pop, try remove the first segment - pHeadSegment = m_SegmentList.remove_head(pHeadSegment, segmentGuard); - } + { + typename gc::Guard segmentGuard; + while (true) { + segment * pHeadSegment = m_SegmentList.head(segmentGuard); + if (!pHeadSegment) { + // Stack is empty + m_Stat.onPopEmpty(); + return false; + } + size_t i = 1; + size_t qf = quasi_factor(); + regular_cell item; + do + { + CDS_DEBUG_ONLY(++nLoopCount); + // Guard the item + // In segmented stack the cell cannot be reused + // So no loop is needed here to protect the cell + item = pHeadSegment->cells[qf - i].data.load(memory_model::memory_order_relaxed); + itemGuard.assign(item.ptr()); + + // Check if this cell is not empty and not marked deleted + if (item.ptr() && !item.bits()) { + // Try to mark the cell as deleted + if (pHeadSegment->cells[qf - i].data.compare_exchange_strong(item, item | 1, + memory_model::memory_order_acquire, atomics::memory_order_relaxed)) + { + --m_ItemCounter; + m_Stat.onPop(); + + return true; + } + assert(item.bits()); + m_Stat.onPopContended(); + } + i++; + } while (i <= qf); + // No nodes to pop, try remove the first segment + pHeadSegment = m_SegmentList.remove_head(pHeadSegment, segmentGuard); + } } //@endcond }; From e2c52e8b31a581890060b4ebabaa119b6f3bd921 Mon Sep 17 00:00:00 2001 From: Veelz Date: Sat, 2 Feb 2019 15:08:16 +0300 Subject: [PATCH 6/6] fix hazard_ptr count --- cds/intrusive/segmented_stack.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cds/intrusive/segmented_stack.h b/cds/intrusive/segmented_stack.h index 2c67e2ec6..22ed5cd7b 100644 --- a/cds/intrusive/segmented_stack.h +++ b/cds/intrusive/segmented_stack.h @@ -159,7 +159,7 @@ namespace cds { namespace intrusive { typedef typename traits::stat stat; ///< Internal statistics policy typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments. - static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm + static const size_t c_nHazardPtrCount = 3 ; ///< Count of hazard pointer required for the algorithm protected: //@cond