SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H
namespace cds { namespace container {
- /// LazyList ordered list related definitions
+ /// \p LazyList ordered list related definitions
/** @ingroup cds_nonintrusive_helper
*/
namespace lazy_list {
+
+ /// \p LazyList internal statistics, see \p cds::intrusive::lazy_list::stat
+ template <typename EventCounter = cds::atomicity::event_counter>
+ using stat = cds::intrusive::lazy_list::stat< EventCounter >;
+
+ /// \p LazyList empty internal statistics, see \p cds::intrusive::lazy_list::empty_stat
+ typedef cds::intrusive::lazy_list::empty_stat empty_stat;
+
+ //@cond
+ template <typename Stat = lazy_list::stat<>>
+ using wrapped_stat = cds::intrusive::lazy_list::wrapped_stat< Stat >;
+ //@endif
+
/// LazyList traits
/**
Either \p compare or \p less or both must be specified.
/// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting
typedef atomicity::empty_item_counter item_counter;
+ /// Internal statistics
+ /**
+ By default, internal statistics is disabled (\p lazy_list::empty_stat).
+ Use \p lazy_list::stat to enable it.
+ */
+ typedef empty_stat stat;
+
/// C++ memory ordering model
/**
Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
- or \p opt::v::sequential_consistent (sequentially consisnent memory model).
+ or \p opt::v::sequential_consistent (sequentially consistent memory model).
*/
typedef opt::v::relaxed_ordering memory_model;
- \p opt::back_off - back-off strategy used. If the option is not specified, \p cds::backoff::Default is used.
- \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter).
To enable item counting use \p atomicity::item_counter.
+ - \p opt::stat - internal statistics. By default, it is disabled (\p lazy_list::empty_stat).
+ To enable it use \p lazy_list::stat
- \p opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro.
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
- or \p opt::v::sequential_consistent (sequentially consisnent memory model).
+ or \p opt::v::sequential_consistent (sequentially consistent memory model).
*/
template <typename... Options>
struct make_traits {
// Tag for selecting lazy list implementation
/**
- This struct is empty and it is used only as a tag for selecting LazyList
+ This empty struct is used only as a tag for selecting \p LazyList
as ordered list implementation in declaration of some classes.
See \p split_list::traits::ordered_list as an example.
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H
/** @ingroup cds_nonintrusive_helper
*/
namespace michael_list {
+
+ /// \p MichaelList internal statistics, see \p cds::intrusive::michael_list::stat
+ template <typename EventCounter = cds::atomicity::event_counter>
+ using stat = cds::intrusive::michael_list::stat< EventCounter >;
+
+ /// \p MichaelList empty internal statistics, see \p cds::intrusive::michael_list::empty_stat
+ typedef cds::intrusive::michael_list::empty_stat empty_stat;
+
+ //@cond
+ template <typename Stat = michael_list::stat<>>
+ using wrapped_stat = cds::intrusive::michael_list::wrapped_stat< Stat >;
+ //@endif
+
/// MichaelList traits
struct traits
{
- typedef CDS_DEFAULT_ALLOCATOR allocator ; ///< allocator used to allocate new node
+ typedef CDS_DEFAULT_ALLOCATOR allocator; ///< allocator used to allocate new node
/// Key comparison functor
/**
/// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting
typedef atomicity::empty_item_counter item_counter;
+ /// Internal statistics
+ /**
+ By default, internal statistics is disabled (\p michael_list::empty_stat).
+ Use \p michael_list::stat to enable it.
+ */
+ typedef empty_stat stat;
+
/// C++ memory ordering model
/**
Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
/// Metafunction converting option list to \p michael_list::traits
/**
+ Supported \p Options are:
+ - \p opt::compare - key comparison functor. No default functor is provided.
+ If the option is not specified, the \p opt::less is used.
+ - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less<T>.
+ - \p opt::allocator - an allocator, default is \p CDS_DEFAULT_ALLOCATOR
+ - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
+ - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter).
+ To enable item counting use \p atomicity::item_counter.
+ - \p opt::stat - internal statistics. By default, it is disabled (\p michael_list::empty_stat).
+ To enable it use \p michael_list::stat
+ - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
+ or \p opt::v::sequential_consistent (sequentially consistent memory model).
+ - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList"
+ Default is \p opt::v::rcu_throw_deadlock
*/
template <typename... Options>
struct make_traits {
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_LAZY_KVLIST_H
typedef typename base_class::item_counter item_counter; ///< Item counter type
typedef typename maker::key_comparator key_comparator; ///< key comparing functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model
+ typedef typename base_class::stat stat; ///< Internal statistics
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm
LazyKVList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyKVList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// Destructor clears the list
~LazyKVList()
{
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_LAZY_LIST_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm
LazyList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// Destructor clears the list
~LazyList()
{
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_KVLIST_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm
MichaelKVList()
{}
- /// List desctructor
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelKVList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
+ /// List destructor
/**
Clears the list
*/
base_class::clear();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
protected:
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_LIST_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm
MichaelList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// List destructor
/**
Clears the list
base_class::clear();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
protected:
//@cond
bool insert_node( node_type * pNode )
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_LAZY_KVLIST_NOGC_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
static CDS_CONSTEXPR bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false)
protected:
LazyKVList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyKVList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// Desctructor clears the list
~LazyKVList()
{
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
/**
Post-condition: the list is empty
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_LAZY_KVLIST_RCU_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
LazyKVList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyKVList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// Destructor clears the list
~LazyKVList()
{
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_LAZY_LIST_NOGC_H
typedef T value_type; ///< Type of value stored in the list
typedef Traits traits; ///< List traits
- typedef typename base_class::back_off back_off; ///< Back-off strategy used
- typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes
- typedef typename base_class::item_counter item_counter; ///< Item counting policy used
- typedef typename maker::key_comparator key_comparator; ///< key comparing functor
- typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::back_off back_off; ///< Back-off strategy used
+ typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes
+ typedef typename base_class::item_counter item_counter; ///< Item counting policy used
+ typedef typename maker::key_comparator key_comparator; ///< key comparing functor
+ typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
+
static CDS_CONSTEXPR bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false)
protected:
LazyList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// Desctructor clears the list
~LazyList()
{
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_LAZY_LIST_RCU_H
typedef T value_type; ///< Type of value stored in the list
typedef Traits traits; ///< List traits
- typedef typename base_class::back_off back_off; ///< Back-off strategy
- typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes
- typedef typename base_class::item_counter item_counter; ///< Item counting policy used
- typedef typename maker::key_comparator key_comparator; ///< key compare functor
- typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::back_off back_off; ///< Back-off strategy
+ typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes
+ typedef typename base_class::item_counter item_counter; ///< Item counting policy used
+ typedef typename maker::key_comparator key_comparator; ///< key compare functor
+ typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
typedef typename base_class::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
LazyList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// Desctructor clears the list
~LazyList()
{
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_NOGC_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
protected:
//@cond
MichaelKVList()
{}
- /// List desctructor
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelKVList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
+ /// List destructor
/**
Clears the list
*/
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_RCU_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p michael_list::traits::memory_model
+ typedef typename base_class::stat stat; ///< Internal statistics
typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
MichaelKVList()
{}
- /// List desctructor
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelKVList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
+ /// List destructor
/**
Clears the list
*/
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
/**
Post-condition: the list is empty
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_LIST_NOGC_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
protected:
//@cond
MichaelList()
{}
- /// List desctructor
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
+ /// List destructor
/**
Clears the list
*/
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_LIST_RCU_H
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
typedef typename maker::key_comparator key_comparator; ///< key comparison functor
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename base_class::stat stat; ///< Internal statistics
typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
MichaelList()
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : base_class( st )
+ {}
+ //@endcond
+
/// List destructor
/**
Clears the list
return base_class::size();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return base_class::statistics();
+ }
+
/// Clears the list
void clear()
{
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MSPRIORITY_QUEUE_H
namespace mspriority_queue {
#ifdef CDS_DOXYGEN_INVOKED
- /// Synonym for cds::intrusive::mspriority_queue::stat
+ /// Synonym for \p cds::intrusive::mspriority_queue::stat
typedef cds::intrusive::mspriority_queue::stat<> stat;
- /// Synonym for cds::intrusive::mspriority_queue::empty_stat
+ /// Synonym for \p cds::intrusive::mspriority_queue::empty_stat
typedef cds::intrusive::mspriority_queue::empty_stat empty_stat;
+
+ /// Synonym for \p cds::intrusive::mspriority_queue::monotonic_counter
+ typedef cds::intrusive::mspriority_queue::monotonic_counter monotonic_counter;
#else
using cds::intrusive::mspriority_queue::stat;
using cds::intrusive::mspriority_queue::empty_stat;
+ using cds::intrusive::mspriority_queue::monotonic_counter;
#endif
/// MSPriorityQueue traits
If the compiler supports move semantics it would be better to specify the move policy
based on the move semantics for type \p T.
- \p opt::stat - internal statistics. Available types: \p mspriority_queue::stat, \p mspriority_queue::empty_stat (the default, no overhead)
- */
+ - \p opt::item_counter - an item counter type for \p MSPriorityQueue.
+ Available type: \p cds::bitop::bit_reverse_counter, \p mspriority_queue::monotonic_counter. See \p cds::intrusive::mspriority_queue::traits::item_counter for details.
+ */
template <typename... Options>
struct make_traits {
# ifdef CDS_DOXYGEN_INVOKED
typedef Traits traits ; ///< Traits template parameter
typedef typename base_class::key_comparator key_comparator; ///< priority comparing functor based on opt::compare and opt::less option setter.
- typedef typename base_class::lock_type lock_type; ///< heap's size lock type
- typedef typename base_class::back_off back_off ; ///< Back-off strategy
- typedef typename base_class::stat stat ; ///< internal statistics type
+ typedef typename base_class::lock_type lock_type; ///< heap's size lock type
+ typedef typename base_class::back_off back_off ; ///< Back-off strategy
+ typedef typename traits::stat stat; ///< internal statistics type, see \p intrusive::mspriority_queue::traits::stat
+ typedef typename traits::item_counter item_counter;///< Item counter type, see \p intrusive::mspriority_queue::traits::item_counter
typedef typename traits::allocator::template rebind<value_type>::other allocator_type; ///< Value allocator
- typedef typename traits::move_policy move_policy; ///< Move policy for type \p T
+ typedef typename traits::move_policy move_policy; ///< Move policy for type \p T
protected:
//@cond
counter_type dec()
{
+ counter_type ret = m_nReversed;
--m_nCounter;
int nBit;
for ( nBit = m_nHighBit - 1; nBit >= 0; --nBit ) {
m_nReversed = m_nCounter;
--m_nHighBit;
}
- return m_nReversed;
+ return ret;
}
counter_type value() const
{
return m_nReversed;
}
+
+ int high_bit() const
+ {
+ return m_nHighBit;
+ }
};
}} // namespace cds::bitop
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H
+#define CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H
+
+#include <type_traits>
+#include <cds/intrusive/details/base.h>
+#include <cds/opt/compare.h>
+#include <cds/algo/atomic.h>
+#include <cds/details/marked_ptr.h>
+#include <cds/urcu/options.h>
+
+namespace cds { namespace intrusive {
+
+ /// \p IterableList ordered list related definitions
+ /** @ingroup cds_intrusive_helper
+ */
+ namespace iterable_list {
+
+ /// Node type
+ template <typename T>
+ struct node
+ {
+ typedef T value_type; ///< Value type
+
+ atomics::atomic< node* > next; ///< pointer to next node in the list
+ atomics::atomic< value_type* > data; ///< pointer to user data, \p nullptr if the node is free
+
+ //@cond
+ node()
+ : next( nullptr )
+ , data( nullptr )
+ {}
+
+ node( value_type * pVal )
+ : next( nullptr )
+ , data( pVal )
+ {}
+ //@endcond
+ };
+
+ /// \p IterableList internal statistics
+ template <typename EventCounter = cds::atomicity::event_counter>
+ struct stat {
+ typedef EventCounter event_counter; ///< Event counter type
+
+ event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
+ event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
+ event_counter m_nInsertRetry; ///< Number of attempts to insert new item
+ event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
+ event_counter m_nUpdateExisting; ///< Number of existing item updates
+ event_counter m_nUpdateFailed; ///< Number of failed \p update() call
+ event_counter m_nUpdateRetry; ///< Number of attempts to update the item
+ event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations
+ event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations
+ event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item
+ event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations
+ event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations
+
+ event_counter m_nNodeCreated; ///< Number of created internal nodes
+ event_counter m_nNodeRemoved; ///< Number of removed internal nodes
+
+ //@cond
+ void onInsertSuccess() { ++m_nInsertSuccess; }
+ void onInsertFailed() { ++m_nInsertFailed; }
+ void onInsertRetry() { ++m_nInsertRetry; }
+ void onUpdateNew() { ++m_nUpdateNew; }
+ void onUpdateExisting() { ++m_nUpdateExisting; }
+ void onUpdateFailed() { ++m_nUpdateFailed; }
+ void onUpdateRetry() { ++m_nUpdateRetry; }
+ void onEraseSuccess() { ++m_nEraseSuccess; }
+ void onEraseFailed() { ++m_nEraseFailed; }
+ void onEraseRetry() { ++m_nEraseRetry; }
+ void onFindSuccess() { ++m_nFindSuccess; }
+ void onFindFailed() { ++m_nFindFailed; }
+
+ void onNodeCreated() { ++m_nNodeCreated; }
+ void onNodeRemoved() { ++m_nNodeRemoved; }
+ //@endcond
+ };
+
+ /// \p IterableList empty internal statistics
+ struct empty_stat {
+ //@cond
+ void onInsertSuccess() const {}
+ void onInsertFailed() const {}
+ void onInsertRetry() const {}
+ void onUpdateNew() const {}
+ void onUpdateExisting() const {}
+ void onUpdateFailed() const {}
+ void onUpdateRetry() const {}
+ void onEraseSuccess() const {}
+ void onEraseFailed() const {}
+ void onEraseRetry() const {}
+ void onFindSuccess() const {}
+ void onFindFailed() const {}
+
+ void onNodeCreated() const {}
+ void onNodeRemoved() const {}
+ //@endcond
+ };
+
+ //@cond
+ template <typename Stat = iterable_list::stat<>>
+ struct wrapped_stat {
+ typedef Stat stat_type;
+
+ wrapped_stat( stat_type& st )
+ : m_stat( st )
+ {}
+
+ void onInsertSuccess() { m_stat.onInsertSuccess(); }
+ void onInsertFailed() { m_stat.onInsertFailed(); }
+ void onInsertRetry() { m_stat.onInsertRetry(); }
+ void onUpdateNew() { m_stat.onUpdateNew(); }
+ void onUpdateExisting() { m_stat.onUpdateExisting();}
+ void onUpdateFailed() { m_stat.onUpdateFailed(); }
+ void onUpdateRetry() { m_stat.onUpdateRetry(); }
+ void onEraseSuccess() { m_stat.onEraseSuccess(); }
+ void onEraseFailed() { m_stat.onEraseFailed(); }
+ void onEraseRetry() { m_stat.onEraseRetry(); }
+ void onFindSuccess() { m_stat.onFindSuccess(); }
+ void onFindFailed() { m_stat.onFindFailed(); }
+
+ void onNodeCreated() { m_stat.onNodeCreated(); }
+ void onNodeRemoved() { m_stat.onNodeRemoved(); }
+
+ stat_type& m_stat;
+ };
+ //@endcond
+
+
+ /// \p IterableList traits
+ struct traits
+ {
+ /// Key comparison functor
+ /**
+ No default functor is provided. If the option is not specified, the \p less is used.
+ */
+ typedef opt::none compare;
+
+ /// Specifies binary predicate used for key compare.
+ /**
+ Default is \p std::less<T>
+ */
+ typedef opt::none less;
+
+ /// Node allocator
+ typedef CDS_DEFAULT_ALLOCATOR node_allocator;
+
+ /// Back-off strategy
+ typedef cds::backoff::Default back_off;
+
+ /// Disposer for removing items
+ typedef opt::v::empty_disposer disposer;
+
+ /// Internal statistics
+ /**
+ By default, internal statistics is disabled (\p iterable_list::empty_stat).
+ Use \p iterable_list::stat to enable it.
+ */
+ typedef empty_stat stat;
+
+ /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting
+ typedef atomicity::empty_item_counter item_counter;
+
+ /// C++ memory ordering model
+ /**
+ Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
+ or \p opt::v::sequential_consistent (sequentially consisnent memory model).
+ */
+ typedef opt::v::relaxed_ordering memory_model;
+
+ /// RCU deadlock checking policy (only for \ref cds_intrusive_IterableList_rcu "RCU-based IterableList")
+ /**
+ List of available policy see \p opt::rcu_check_deadlock
+ */
+ typedef opt::v::rcu_throw_deadlock rcu_check_deadlock;
+ };
+
+ /// Metafunction converting option list to \p iterable_list::traits
+ /**
+ Supported \p Options are:
+ - \p opt::compare - key comparison functor. No default functor is provided.
+ If the option is not specified, the \p opt::less is used.
+ - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less<T>.
+ - \p opt::node_allocator - node allocator, default is \p std::allocator.
+ - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
+ - \p opt::disposer - the functor used for disposing removed items. Default is \p opt::v::empty_disposer. Due the nature
+ of GC schema the disposer may be called asynchronously.
+ - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter).
+ To enable item counting use \p atomicity::item_counter.
+ - \p opt::stat - internal statistics. By default, it is disabled (\p iterable_list::empty_stat).
+ To enable it use \p iterable_list::stat
+ - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
+ or \p opt::v::sequential_consistent (sequentially consistent memory model).
+ - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_IterableList_rcu "RCU-based IterableList"
+ Default is \p opt::v::rcu_throw_deadlock
+ */
+ template <typename... Options>
+ struct make_traits {
+# ifdef CDS_DOXYGEN_INVOKED
+ typedef implementation_defined type ; ///< Metafunction result
+# else
+ typedef typename cds::opt::make_options<
+ typename cds::opt::find_type_traits< traits, Options... >::type
+ ,Options...
+ >::type type;
+# endif
+ };
+
+
+ //@cond
+ template <typename Stat>
+ struct select_stat_wrapper
+ {
+ typedef Stat stat;
+ typedef iterable_list::wrapped_stat<Stat> wrapped_stat;
+ enum {
+ empty = false
+ };
+ };
+
+ template <>
+ struct select_stat_wrapper< empty_stat >
+ {
+ typedef empty_stat stat;
+ typedef empty_stat wrapped_stat;
+ enum {
+ empty = true
+ };
+ };
+
+ template <typename Stat>
+ struct select_stat_wrapper< iterable_list::wrapped_stat<Stat>>: public select_stat_wrapper<Stat>
+ {};
+ //@endcond
+
+ } // namespace iterable_list
+
+ //@cond
+ // Forward declaration
+ template < class GC, typename T, class Traits = iterable_list::traits >
+ class IterableList;
+ //@endcond
+
+ //@cond
+ template <typename List>
+ struct is_iterable_list {
+ enum {
+ value = false
+ };
+ };
+
+ template <typename GC, typename T, typename Traits>
+ struct is_iterable_list< IterableList< GC, T, Traits >> {
+ enum {
+ value = true
+ };
+ };
+ //@endcond
+
+}} // namespace cds::intrusive
+
+#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H
//@endcond
};
+ /// \p LazyList internal statistics
+ template <typename EventCounter = cds::atomicity::event_counter>
+ struct stat {
+ typedef EventCounter event_counter; ///< Event counter type
+
+ event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
+ event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
+ event_counter m_nInsertRetry; ///< Number of attempts to insert new item
+ event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
+ event_counter m_nUpdateExisting; ///< Number of existing item updates
+ event_counter m_nUpdateFailed; ///< Number of failed \p update() call
+ event_counter m_nUpdateRetry; ///< Number of attempts to \p update() the item
+ event_counter m_nUpdateMarked; ///< Number of attempts to \p update() logically deleted (marked) items
+ event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations
+ event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations
+ event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item
+ event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations
+ event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations
+
+ event_counter m_nValidationSuccess; ///< Number of successful validating of search result
+ event_counter m_nValidationFailed; ///< Number of failed validating of search result
+
+ //@cond
+ void onInsertSuccess() { ++m_nInsertSuccess; }
+ void onInsertFailed() { ++m_nInsertFailed; }
+ void onInsertRetry() { ++m_nInsertRetry; }
+ void onUpdateNew() { ++m_nUpdateNew; }
+ void onUpdateExisting() { ++m_nUpdateExisting; }
+ void onUpdateFailed() { ++m_nUpdateFailed; }
+ void onUpdateRetry() { ++m_nUpdateRetry; }
+ void onUpdateMarked() { ++m_nUpdateMarked; }
+ void onEraseSuccess() { ++m_nEraseSuccess; }
+ void onEraseFailed() { ++m_nEraseFailed; }
+ void onEraseRetry() { ++m_nEraseRetry; }
+ void onFindSuccess() { ++m_nFindSuccess; }
+ void onFindFailed() { ++m_nFindFailed; }
+
+ void onValidationSuccess() { ++m_nValidationSuccess; }
+ void onValidationFailed() { ++m_nValidationFailed; }
+ //@endcond
+ };
+
+ /// \p LazyList empty internal statistics
+ struct empty_stat {
+ //@cond
+ void onInsertSuccess() const {}
+ void onInsertFailed() const {}
+ void onInsertRetry() const {}
+ void onUpdateNew() const {}
+ void onUpdateExisting() const {}
+ void onUpdateFailed() const {}
+ void onUpdateRetry() const {}
+ void onUpdateMarked() const {}
+ void onEraseSuccess() const {}
+ void onEraseFailed() const {}
+ void onEraseRetry() const {}
+ void onFindSuccess() const {}
+ void onFindFailed() const {}
+
+ void onValidationSuccess() const {}
+ void onValidationFailed() const {}
+ //@endcond
+ };
+
+ //@cond
+ template <typename Stat = lazy_list::stat<>>
+ struct wrapped_stat {
+ typedef Stat stat_type;
+
+ wrapped_stat( stat_type& st )
+ : m_stat( st )
+ {}
+
+ void onInsertSuccess() { m_stat.onInsertSuccess(); }
+ void onInsertFailed() { m_stat.onInsertFailed(); }
+ void onInsertRetry() { m_stat.onInsertRetry(); }
+ void onUpdateNew() { m_stat.onUpdateNew(); }
+ void onUpdateExisting() { m_stat.onUpdateExisting(); }
+ void onUpdateFailed() { m_stat.onUpdateFailed(); }
+ void onUpdateRetry() { m_stat.onUpdateRetry(); }
+ void onUpdateMarked() { m_stat.onUpdateMarked(); }
+ void onEraseSuccess() { m_stat.onEraseSuccess(); }
+ void onEraseFailed() { m_stat.onEraseFailed(); }
+ void onEraseRetry() { m_stat.onEraseRetry(); }
+ void onFindSuccess() { m_stat.onFindSuccess(); }
+ void onFindFailed() { m_stat.onFindFailed(); }
+
+ void onValidationSuccess() { m_stat.onValidationSuccess(); }
+ void onValidationFailed() { m_stat.onValidationFailed(); }
+
+ stat_type& m_stat;
+ };
+ //@endcond
+
+
/// LazyList traits
struct traits
{
/// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting
typedef atomicity::empty_item_counter item_counter;
+ /// Internal statistics
+ /**
+ By default, internal statistics is disabled (\p lazy_list::empty_stat).
+ Use \p lazy_list::stat to enable it.
+ */
+ typedef empty_stat stat;
+
/// Link fields checking feature
/**
Default is \p opt::debug_check_link
- \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link
- \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter).
To enable item counting use \p atomicity::item_counter.
+ - \p opt::stat - internal statistics. By default, it is disabled (\p lazy_list::empty_stat).
+ To enable it use \p lazy_list::stat
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList"
# endif
};
+ //@cond
+ template <typename Stat>
+ struct select_stat_wrapper
+ {
+ typedef Stat stat;
+ typedef lazy_list::wrapped_stat<Stat> wrapped_stat;
+ enum {
+ empty = false
+ };
+ };
+
+ template <>
+ struct select_stat_wrapper< empty_stat >
+ {
+ typedef empty_stat stat;
+ typedef empty_stat wrapped_stat;
+ enum {
+ empty = true
+ };
+ };
+
+ template <typename Stat>
+ struct select_stat_wrapper< lazy_list::wrapped_stat<Stat>>: public select_stat_wrapper< Stat >
+ {};
+ //@endcond
+
} // namespace lazy_list
//@cond
class LazyList;
//@endcond
+ //@cond
+ template <typename List>
+ struct is_lazy_list {
+ enum {
+ value = false
+ };
+ };
+
+ template <typename GC, typename T, typename Traits>
+ struct is_lazy_list< LazyList< GC, T, Traits >> {
+ enum {
+ value = true
+ };
+ };
+ //@endcond
+
}} // namespace cds::intrusive
#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H
//@endcond
};
+
+ /// \p MichaelList internal statistics
+ template <typename EventCounter = cds::atomicity::event_counter>
+ struct stat {
+ typedef EventCounter event_counter; ///< Event counter type
+
+ event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
+ event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
+ event_counter m_nInsertRetry; ///< Number of attempts to insert new item
+ event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
+ event_counter m_nUpdateExisting; ///< Number of existing item updates
+ event_counter m_nUpdateFailed; ///< Number of failed \p update() call
+ event_counter m_nUpdateRetry; ///< Number of attempts to \p update() the item
+ event_counter m_nUpdateMarked; ///< Number of attempts to \p update() logically deleted (marked) items
+ event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations
+ event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations
+ event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item
+ event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations
+ event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations
+
+ event_counter m_nHelpingSuccess; ///< Number of successful help attempts to remove marked item during searching
+ event_counter m_nHelpingFailed; ///< Number if failed help attempts to remove marked item during searching
+
+ //@cond
+ void onInsertSuccess() { ++m_nInsertSuccess; }
+ void onInsertFailed() { ++m_nInsertFailed; }
+ void onInsertRetry() { ++m_nInsertRetry; }
+ void onUpdateNew() { ++m_nUpdateNew; }
+ void onUpdateExisting() { ++m_nUpdateExisting; }
+ void onUpdateFailed() { ++m_nUpdateFailed; }
+ void onUpdateRetry() { ++m_nUpdateRetry; }
+ void onUpdateMarked() { ++m_nUpdateMarked; }
+ void onEraseSuccess() { ++m_nEraseSuccess; }
+ void onEraseFailed() { ++m_nEraseFailed; }
+ void onEraseRetry() { ++m_nEraseRetry; }
+ void onFindSuccess() { ++m_nFindSuccess; }
+ void onFindFailed() { ++m_nFindFailed; }
+
+ void onHelpingSuccess() { ++m_nHelpingSuccess; }
+ void onHelpingFailed() { ++m_nHelpingFailed; }
+ //@endcond
+ };
+
+ /// \p MichaelList empty internal statistics
+ struct empty_stat {
+ //@cond
+ void onInsertSuccess() const {}
+ void onInsertFailed() const {}
+ void onInsertRetry() const {}
+ void onUpdateNew() const {}
+ void onUpdateExisting() const {}
+ void onUpdateFailed() const {}
+ void onUpdateRetry() const {}
+ void onUpdateMarked() const {}
+ void onEraseSuccess() const {}
+ void onEraseFailed() const {}
+ void onEraseRetry() const {}
+ void onFindSuccess() const {}
+ void onFindFailed() const {}
+
+ void onHelpingSuccess() const {}
+ void onHelpingFailed() const {}
+ //@endcond
+ };
+
+ //@cond
+ template <typename Stat = michael_list::stat<>>
+ struct wrapped_stat {
+ typedef Stat stat_type;
+
+ wrapped_stat( stat_type& st )
+ : m_stat( st )
+ {}
+
+ void onInsertSuccess() { m_stat.onInsertSuccess(); }
+ void onInsertFailed() { m_stat.onInsertFailed(); }
+ void onInsertRetry() { m_stat.onInsertRetry(); }
+ void onUpdateNew() { m_stat.onUpdateNew(); }
+ void onUpdateExisting() { m_stat.onUpdateExisting(); }
+ void onUpdateFailed() { m_stat.onUpdateFailed(); }
+ void onUpdateRetry() { m_stat.onUpdateRetry(); }
+ void onUpdateMarked() { m_stat.onUpdateMarked(); }
+ void onEraseSuccess() { m_stat.onEraseSuccess(); }
+ void onEraseFailed() { m_stat.onEraseFailed(); }
+ void onEraseRetry() { m_stat.onEraseRetry(); }
+ void onFindSuccess() { m_stat.onFindSuccess(); }
+ void onFindFailed() { m_stat.onFindFailed(); }
+
+ void onHelpingSuccess() { m_stat.onHelpingSuccess(); }
+ void onHelpingFailed() { m_stat.onHelpingFailed(); }
+
+ stat_type& m_stat;
+ };
+ //@endcond
+
/// MichaelList traits
struct traits
{
typedef opt::v::empty_disposer disposer;
/// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting
- typedef atomicity::empty_item_counter item_counter;
+ typedef atomicity::empty_item_counter item_counter;
+
+ /// Internal statistics
+ /**
+ By default, internal statistics is disabled (\p michael_list::empty_stat).
+ Use \p michael_list::stat to enable it.
+ */
+ typedef empty_stat stat;
/// Link fields checking feature
/**
- \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link
- \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter).
To enable item counting use \p atomicity::item_counter.
+ - \p opt::stat - internal statistics. By default, it is disabled (\p michael_list::empty_stat).
+ To enable it use \p michael_list::stat
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consistent memory model).
- \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList"
# endif
};
+
+ //@cond
+ template <typename Stat>
+ struct select_stat_wrapper
+ {
+ typedef Stat stat;
+ typedef michael_list::wrapped_stat<Stat> wrapped_stat;
+ enum {
+ empty = false
+ };
+ };
+
+ template <>
+ struct select_stat_wrapper< empty_stat >
+ {
+ typedef empty_stat stat;
+ typedef empty_stat wrapped_stat;
+ enum {
+ empty = true
+ };
+ };
+
+ template <typename Stat>
+ struct select_stat_wrapper< michael_list::wrapped_stat<Stat>>: public select_stat_wrapper< Stat >
+ {};
+
+ //@endcond
+
} // namespace michael_list
//@cond
//@endcond
- /// Tag for selecting Michael list
- //class michael_list_tag;
+ //@cond
+ template <typename List>
+ struct is_michael_list {
+ enum {
+ value = false
+ };
+ };
+
+ template <typename GC, typename T, typename Traits>
+ struct is_michael_list< MichaelList< GC, T, Traits >> {
+ enum {
+ value = true
+ };
+ };
+ //@endcond
}} // namespace cds::intrusive
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_SET_BASE_H
template <typename OrderedList, bool IsConst>
class iterator
{
- friend class iterator < OrderedList, !IsConst >;
+ friend class iterator< OrderedList, !IsConst >;
+
protected:
typedef OrderedList bucket_type;
typedef typename list_iterator_selector< bucket_type, IsConst>::bucket_ptr bucket_ptr;
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H
+#define CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H
+
+#include <cds/intrusive/details/iterable_list_base.h>
+#include <cds/details/make_const_type.h>
+
+namespace cds { namespace intrusive {
+
+ /// Iterable lock-free ordered single-linked list
+ /** @ingroup cds_intrusive_list
+ \anchor cds_intrusive_IterableList_hp
+
+ This lock-free list implementation supports thread-safe iterators.
+ Unlike \p cds::intrusive::MichaelList the iterable list does not require
+ any hook in \p T to be stored in the list.
+
+ Usually, ordered single-linked list is used as a building block for the hash table implementation.
+ Iterable list is suitable for almost append-only hash table because the list doesn't delete
+ its internal node when erasing a key but it is marked them as empty to be reused in the future.
+ However, plenty of empty nodes degrades performance.
+ Separation of internal nodes and user data implies the need for an allocator for internal node
+ so the iterable list is not fully intrusive. Nevertheless, if you need thread-safe iterator,
+ the iterable list is good choice.
+
+ The complexity of searching is <tt>O(N)</tt>.
+
+ Template arguments:
+ - \p GC - Garbage collector used.
+ - \p T - type to be stored in the list.
+ - \p Traits - type traits, default is \p iterable_list::traits. It is possible to declare option-based
+ list with \p cds::intrusive::iterable_list::make_traits metafunction:
+ For example, the following traits-based declaration of \p gc::HP iterable list
+ \code
+ #include <cds/intrusive/iterable_list_hp.h>
+ // Declare item stored in your list
+ struct foo
+ {
+ int nKey;
+ // .... other data
+ };
+
+ // Declare comparator for the item
+ struct my_compare {
+ int operator()( foo const& i1, foo const& i2 ) const
+ {
+ return i1.nKey - i2.nKey;
+ }
+ };
+
+ // Declare traits
+ struct my_traits: public cds::intrusive::iterable_list::traits
+ {
+ typedef my_compare compare;
+ };
+
+ // Declare list
+ typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > list_type;
+ \endcode
+ is equivalent for the following option-based list
+ \code
+ #include <cds/intrusive/iterable_list_hp.h>
+
+ // foo struct and my_compare are the same
+
+ // Declare option-based list
+ typedef cds::intrusive::IterableList< cds::gc::HP, foo,
+ typename cds::intrusive::iterable_list::make_traits<
+ cds::intrusive::opt::compare< my_compare > // item comparator option
+ >::type
+ > option_list_type;
+ \endcode
+
+ \par Usage
+ There are different specializations of this template for each garbage collecting schema.
+ You should select GC you want and include appropriate .h-file:
+ - for \p gc::HP: <tt> <cds/intrusive/iterable_list_hp.h> </tt>
+ - for \p gc::DHP: <tt> <cds/intrusive/iterable_list_dhp.h> </tt>
+ - for \ref cds_urcu_gc "RCU type" - see \ref cds_intrusive_IterableList_rcu "RCU-based IterableList"
+ */
+ template <
+ class GC
+ ,typename T
+#ifdef CDS_DOXYGEN_INVOKED
+ ,class Traits = iterable_list::traits
+#else
+ ,class Traits
+#endif
+ >
+ class IterableList
+ {
+ public:
+ typedef T value_type; ///< type of value stored in the list
+ typedef Traits traits; ///< Traits template parameter
+
+ typedef iterable_list::node< value_type > node_type; ///< node type
+
+# ifdef CDS_DOXYGEN_INVOKED
+ typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter.
+# else
+ typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
+# endif
+
+ typedef typename traits::disposer disposer; ///< disposer for \p value_type
+
+ typedef GC gc; ///< Garbage collector
+ typedef typename traits::back_off back_off; ///< back-off strategy
+ typedef typename traits::item_counter item_counter; ///< Item counting policy used
+ typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option
+ typedef typename traits::node_allocator node_allocator; ///< Node allocator
+ typedef typename traits::stat stat; ///< Internal statistics
+
+ typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
+
+ static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 2; ///< Count of hazard pointer required for the algorithm
+
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef IterableList<
+ gc
+ , value_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = iterable_list::select_stat_wrapper< Stat >;
+ //@endcond
+
+ protected:
+ typedef atomics::atomic< node_type* > atomic_node_ptr; ///< Atomic node pointer
+ typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
+
+ atomic_node_ptr m_pHead; ///< Head pointer
+ item_counter m_ItemCounter; ///< Item counter
+ mutable stat m_Stat; ///< Internal statistics
+
+ //@cond
+ typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator;
+
+ /// Position pointer for item search
+ struct position {
+ atomic_node_ptr * pHead; ///< Previous node (pointer to pPrev->next or to m_pHead)
+ node_type * pPrev; ///< Previous node
+ node_type * pCur; ///< Current node
+
+ value_type * pFound; ///< Value of \p pCur->data, valid only if data found
+ typename gc::Guard guard; ///< guard for \p pFound
+ };
+ //@endcond
+
+ protected:
+ //@cond
+ template <bool IsConst>
+ class iterator_type
+ {
+ friend class IterableList;
+
+ protected:
+ node_type* m_pNode;
+ value_type* m_pVal;
+ typename gc::Guard m_Guard; // for m_pVal
+
+ void next()
+ {
+ while ( m_pNode ) {
+ m_pNode = m_pNode->next.load( memory_model::memory_order_relaxed );
+ if ( !m_pNode )
+ break;
+ m_pVal = m_Guard.protect( m_pNode->data );
+ if ( m_pVal )
+ break;
+ }
+ }
+
+ explicit iterator_type( atomic_node_ptr const& pNode )
+ : m_pNode( pNode.load( memory_model::memory_order_relaxed ))
+ , m_pVal( nullptr )
+ {
+ if ( m_pNode ) {
+ m_pVal = m_Guard.protect( m_pNode->data );
+ if ( !m_pVal )
+ next();
+ }
+ }
+
+ iterator_type( node_type* pNode, value_type* pVal )
+ : m_pNode( pNode )
+ , m_pVal( pVal )
+ {
+ if ( m_pNode ) {
+ assert( pVal != nullptr );
+ m_Guard.assign( pVal );
+ }
+ }
+
+ public:
+ typedef typename cds::details::make_const_type<value_type, IsConst>::pointer value_ptr;
+ typedef typename cds::details::make_const_type<value_type, IsConst>::reference value_ref;
+
+ iterator_type()
+ : m_pNode( nullptr )
+ , m_pVal( nullptr )
+ {}
+
+ iterator_type( iterator_type const& src )
+ : m_pNode( src.m_pNode )
+ , m_pVal( src.m_pVal )
+ {
+ m_Guard.assign( m_pVal );
+ }
+
+ value_ptr operator ->() const
+ {
+ return m_pVal;
+ }
+
+ value_ref operator *() const
+ {
+ assert( m_pVal != nullptr );
+ return *m_pVal;
+ }
+
+ /// Pre-increment
+ iterator_type& operator ++()
+ {
+ next();
+ return *this;
+ }
+
+ iterator_type& operator = (iterator_type const& src)
+ {
+ m_pNode = src.m_pNode;
+ m_pVal = src.m_pVal;
+ m_Guard.assign( m_pVal );
+ return *this;
+ }
+
+ template <bool C>
+ bool operator ==(iterator_type<C> const& i ) const
+ {
+ return m_pNode == i.m_pNode;
+ }
+ template <bool C>
+ bool operator !=(iterator_type<C> const& i ) const
+ {
+ return m_pNode != i.m_pNode;
+ }
+ };
+ //@endcond
+
+ public:
+ ///@name Thread-safe forward iterators
+ //@{
+ /// Forward iterator
+ /**
+ The forward iterator for iterable list has some features:
+ - it has no post-increment operator
+ - to protect the value, the iterator contains a GC-specific guard.
+ For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard"
+ may be thrown if the limit of guard count per thread is exceeded.
+ - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard.
+ - Iterator is thread-safe: event if the element the iterator points to is removed, the iterator stays valid because
+ it contains the guard keeping the value from to be recycled.
+
+ The iterator interface:
+ \code
+ class iterator {
+ public:
+ // Default constructor
+ iterator();
+
+ // Copy construtor
+ iterator( iterator const& src );
+
+ // Dereference operator
+ value_type * operator ->() const;
+
+ // Dereference operator
+ value_type& operator *() const;
+
+ // Preincrement operator
+ iterator& operator ++();
+
+ // Assignment operator
+ iterator& operator = (iterator const& src);
+
+ // Equality operators
+ bool operator ==(iterator const& i ) const;
+ bool operator !=(iterator const& i ) const;
+ };
+ \endcode
+
+ @note For two iterators pointed to the same element the value can be different;
+ this code
+ \code
+ if ( it1 == it2 )
+ assert( &(*it1) == &(*it2) );
+ \endcode
+ can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread.
+ The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing.
+ Other iterator can observe modified value of the element.
+ */
+ typedef iterator_type<false> iterator;
+ /// Const forward iterator
+ /**
+ For iterator's features and requirements see \ref iterator
+ */
+ typedef iterator_type<true> const_iterator;
+
+ /// Returns a forward iterator addressing the first element in a list
+ /**
+ For empty list \code begin() == end() \endcode
+ */
+ iterator begin()
+ {
+ return iterator( m_pHead );
+ }
+
+ /// Returns an iterator that addresses the location succeeding the last element in a list
+ /**
+ Do not use the value returned by <tt>end</tt> function to access any item.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
+
+ The returned value can be used only to control reaching the end of the list.
+ For empty list <tt>begin() == end()</tt>
+ */
+ iterator end()
+ {
+ return iterator();
+ }
+
+ /// Returns a forward const iterator addressing the first element in a list
+ const_iterator cbegin() const
+ {
+ return const_iterator( m_pHead );
+ }
+
+ /// Returns a forward const iterator addressing the first element in a list
+ const_iterator begin() const
+ {
+ return const_iterator( m_pHead );
+ }
+
+ /// Returns an const iterator that addresses the location succeeding the last element in a list
+ const_iterator end() const
+ {
+ return const_iterator();
+ }
+
+ /// Returns an const iterator that addresses the location succeeding the last element in a list
+ const_iterator cend() const
+ {
+ return const_iterator();
+ }
+ //@}
+
+ public:
+ /// Default constructor initializes empty list
+ IterableList()
+ : m_pHead( nullptr )
+ {}
+
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, iterable_list::wrapped_stat<Stat>>::value >>
+ explicit IterableList( Stat& st )
+ : m_pHead( nullptr )
+ , m_Stat( st )
+ {}
+ //@endcond
+
+ /// Destroys the list object
+ ~IterableList()
+ {
+ destroy();
+ }
+
+ /// Inserts new node
+ /**
+ The function inserts \p val into the list if the list does not contain
+ an item with key equal to \p val.
+
+ Returns \p true if \p val has been linked to the list, \p false otherwise.
+ */
+ bool insert( value_type& val )
+ {
+ return insert_at( m_pHead, val );
+ }
+
+ /// Inserts new node
+ /**
+ This function is intended for derived non-intrusive containers.
+
+ The function allows to split new item creating into two part:
+ - create item with key only
+ - insert new item into the list
+ - if inserting is success, calls \p f functor to initialize value-field of \p val.
+
+ The functor signature is:
+ \code
+ void func( value_type& val );
+ \endcode
+ where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
+ \p val no any other changes could be made on this list's item by concurrent threads.
+ The user-defined functor is called only if the inserting is success.
+
+ @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
+ */
+ template <typename Func>
+ bool insert( value_type& val, Func f )
+ {
+ return insert_at( m_pHead, val, f );
+ }
+
+ /// Updates the node
+ /**
+ The operation performs inserting or changing data with lock-free manner.
+
+ If the item \p val is not found in the list, then \p val is inserted
+ iff \p bInsert is \p true.
+ Otherwise, the current element is changed to \p val, the element will be retired later
+ by call \p Traits::disposer.
+ The functor \p func is called after inserting or replacing, it signature is:
+ \code
+ void func( value_type& val, value_type * old );
+ \endcode
+ where
+ - \p val - argument \p val passed into the \p %update() function
+ - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr.
+
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
+ \p second is \p true if \p val has been added or \p false if the item with that key
+ already in the list.
+ */
+ template <typename Func>
+ std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
+ {
+ return update_at( m_pHead, val, func, bInsert );
+ }
+
+ /// Insert or update
+ /**
+ The operation performs inserting or updating data with lock-free manner.
+
+ If the item \p val is not found in the list, then \p val is inserted
+ iff \p bInsert is \p true.
+ Otherwise, the current element is changed to \p val, the old element will be retired later
+ by call \p Traits::disposer.
+
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
+ \p second is \p true if \p val has been added or \p false if the item with that key
+ already in the list.
+ */
+ std::pair<bool, bool> upsert( value_type& val, bool bInsert = true )
+ {
+ return update_at( m_pHead, val, []( value_type&, value_type* ) {}, bInsert );
+ }
+
+ /// Unlinks the item \p val from the list
+ /**
+ The function searches the item \p val in the list and unlinks it from the list
+ if it is found and it is equal to \p val.
+
+ Difference between \p erase() and \p %unlink(): \p %erase() finds <i>a key</i>
+ and deletes the item found. \p %unlink() finds an item by key and deletes it
+ only if \p val is an item of the list, i.e. the pointer to item found
+ is equal to <tt> &val </tt>.
+
+ \p disposer specified in \p Traits is called for deleted item.
+
+ The function returns \p true if success and \p false otherwise.
+ */
+ bool unlink( value_type& val )
+ {
+ return unlink_at( m_pHead, val );
+ }
+
+ /// Deletes the item from the list
+ /** \anchor cds_intrusive_IterableList_hp_erase_val
+ The function searches an item with key equal to \p key in the list,
+ unlinks it from the list, and returns \p true.
+ If \p key is not found the function return \p false.
+
+ \p disposer specified in \p Traits is called for deleted item.
+ */
+ template <typename Q>
+ bool erase( Q const& key )
+ {
+ return erase_at( m_pHead, key, key_comparator());
+ }
+
+ /// Deletes the item from the list using \p pred predicate for searching
+ /**
+ The function is an analog of \ref cds_intrusive_IterableList_hp_erase_val "erase(Q const&)"
+ but \p pred is used for key comparing.
+ \p Less functor has the interface like \p std::less.
+ \p pred must imply the same element order as the comparator used for building the list.
+
+ \p disposer specified in \p Traits is called for deleted item.
+ */
+ template <typename Q, typename Less>
+ bool erase_with( Q const& key, Less pred )
+ {
+ CDS_UNUSED( pred );
+ return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
+ }
+
+ /// Deletes the item from the list
+ /** \anchor cds_intrusive_IterableList_hp_erase_func
+ The function searches an item with key equal to \p key in the list,
+ call \p func functor with item found, unlinks it from the list, and returns \p true.
+ The \p Func interface is
+ \code
+ struct functor {
+ void operator()( value_type const& item );
+ };
+ \endcode
+ If \p key is not found the function return \p false, \p func is not called.
+
+ \p disposer specified in \p Traits is called for deleted item.
+ */
+ template <typename Q, typename Func>
+ bool erase( Q const& key, Func func )
+ {
+ return erase_at( m_pHead, key, key_comparator(), func );
+ }
+
+ /// Deletes the item from the list using \p pred predicate for searching
+ /**
+ The function is an analog of \ref cds_intrusive_IterableList_hp_erase_func "erase(Q const&, Func)"
+ but \p pred is used for key comparing.
+ \p Less functor has the interface like \p std::less.
+ \p pred must imply the same element order as the comparator used for building the list.
+
+ \p disposer specified in \p Traits is called for deleted item.
+ */
+ template <typename Q, typename Less, typename Func>
+ bool erase_with( Q const& key, Less pred, Func f )
+ {
+ CDS_UNUSED( pred );
+ return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>(), f );
+ }
+
+ /// Extracts the item from the list with specified \p key
+ /** \anchor cds_intrusive_IterableList_hp_extract
+ The function searches an item with key equal to \p key,
+ unlinks it from the list, and returns it as \p guarded_ptr.
+ If \p key is not found returns an empty guarded pointer.
+
+ Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
+
+ The \ref disposer specified in \p Traits class template parameter is called automatically
+ by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released.
+ @note Each \p guarded_ptr object uses the GC's guard that can be limited resource.
+
+ Usage:
+ \code
+ typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > ord_list;
+ ord_list theList;
+ // ...
+ {
+ ord_list::guarded_ptr gp(theList.extract( 5 ));
+ if ( gp ) {
+ // Deal with gp
+ // ...
+ }
+ // Destructor of gp releases internal HP guard
+ }
+ \endcode
+ */
+ template <typename Q>
+ guarded_ptr extract( Q const& key )
+ {
+ guarded_ptr gp;
+ extract_at( m_pHead, gp.guard(), key, key_comparator());
+ return gp;
+ }
+
+ /// Extracts the item using compare functor \p pred
+ /**
+ The function is an analog of \ref cds_intrusive_IterableList_hp_extract "extract(Q const&)"
+ but \p pred predicate is used for key comparing.
+
+ \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
+ in any order.
+ \p pred must imply the same element order as the comparator used for building the list.
+ */
+ template <typename Q, typename Less>
+ guarded_ptr extract_with( Q const& key, Less pred )
+ {
+ CDS_UNUSED( pred );
+ guarded_ptr gp;
+ extract_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
+ return gp;
+ }
+
+ /// Finds \p key in the list
+ /** \anchor cds_intrusive_IterableList_hp_find_func
+ The function searches the item with key equal to \p key and calls the functor \p f for item found.
+ The interface of \p Func functor is:
+ \code
+ struct functor {
+ void operator()( value_type& item, Q& key );
+ };
+ \endcode
+ where \p item is the item found, \p key is the \p %find() function argument.
+
+ The functor may change non-key fields of \p item. Note that the function is only guarantee
+ that \p item cannot be disposed during functor is executing.
+ The function does not serialize simultaneous access to the \p item. If such access is
+ possible you must provide your own synchronization schema to keep out unsafe item modifications.
+
+ The function returns \p true if \p val is found, \p false otherwise.
+ */
+ template <typename Q, typename Func>
+ bool find( Q& key, Func f ) const
+ {
+ return find_at( m_pHead, key, key_comparator(), f );
+ }
+ //@cond
+ template <typename Q, typename Func>
+ bool find( Q const& key, Func f ) const
+ {
+ return find_at( m_pHead, key, key_comparator(), f );
+ }
+ //@endcond
+
+ /// Finds \p key in the list and returns iterator pointed to the item found
+ /**
+ If \p key is not found the function returns \p end().
+ */
+ template <typename Q>
+ iterator find( Q& key ) const
+ {
+ return find_iterator_at( m_pHead, key, key_comparator());
+ }
+ //@cond
+ template <typename Q>
+ iterator find( Q const& key ) const
+ {
+ return find_iterator_at( m_pHead, key, key_comparator() );
+ }
+ //@endcond
+
+ /// Finds the \p key using \p pred predicate for searching
+ /**
+ The function is an analog of \ref cds_intrusive_IterableList_hp_find_func "find(Q&, Func)"
+ but \p pred is used for key comparing.
+ \p Less functor has the interface like \p std::less.
+ \p pred must imply the same element order as the comparator used for building the list.
+ */
+ template <typename Q, typename Less, typename Func>
+ bool find_with( Q& key, Less pred, Func f ) const
+ {
+ CDS_UNUSED( pred );
+ return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>(), f );
+ }
+ //@cond
+ template <typename Q, typename Less, typename Func>
+ bool find_with( Q const& key, Less pred, Func f ) const
+ {
+ CDS_UNUSED( pred );
+ return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>(), f );
+ }
+ //@endcond
+
+ /// Finds \p key in the list using \p pred predicate for searching and returns iterator pointed to the item found
+ /**
+ The function is an analog of \p find(Q&) but \p pred is used for key comparing.
+ \p Less functor has the interface like \p std::less.
+ \p pred must imply the same element order as the comparator used for building the list.
+
+ If \p key is not found the function returns \p end().
+ */
+ template <typename Q, typename Less>
+ iterator find_with( Q& key, Less pred ) const
+ {
+ CDS_UNUSED( pred );
+ return find_iterator_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
+ }
+ //@cond
+ template <typename Q, typename Less>
+ iterator find_with( Q const& key, Less pred ) const
+ {
+ CDS_UNUSED( pred );
+ return find_iterator_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
+ }
+ //@endcond
+
+ /// Checks whether the list contains \p key
+ /**
+ The function searches the item with key equal to \p key
+ and returns \p true if it is found, and \p false otherwise.
+ */
+ template <typename Q>
+ bool contains( Q const& key ) const
+ {
+ return find_at( m_pHead, key, key_comparator());
+ }
+
+ /// Checks whether the list contains \p key using \p pred predicate for searching
+ /**
+ The function is an analog of <tt>contains( key )</tt> but \p pred is used for key comparing.
+ \p Less functor has the interface like \p std::less.
+ \p Less must imply the same element order as the comparator used for building the list.
+ */
+ template <typename Q, typename Less>
+ bool contains( Q const& key, Less pred ) const
+ {
+ CDS_UNUSED( pred );
+ return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
+ }
+
+ /// Finds the \p key and return the item found
+ /** \anchor cds_intrusive_IterableList_hp_get
+ The function searches the item with key equal to \p key
+ and returns it as \p guarded_ptr.
+ If \p key is not found the function returns an empty guarded pointer.
+
+ The \ref disposer specified in \p Traits class template parameter is called
+ by garbage collector \p GC automatically when returned \ref guarded_ptr object
+ will be destroyed or released.
+ @note Each \p guarded_ptr object uses one GC's guard which can be limited resource.
+
+ Usage:
+ \code
+ typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > ord_list;
+ ord_list theList;
+ // ...
+ {
+ ord_list::guarded_ptr gp(theList.get( 5 ));
+ if ( gp ) {
+ // Deal with gp
+ //...
+ }
+ // Destructor of guarded_ptr releases internal HP guard
+ }
+ \endcode
+
+ Note the compare functor specified for \p Traits template parameter
+ should accept a parameter of type \p Q that can be not the same as \p value_type.
+ */
+ template <typename Q>
+ guarded_ptr get( Q const& key ) const
+ {
+ guarded_ptr gp;
+ get_at( m_pHead, gp.guard(), key, key_comparator());
+ return gp;
+ }
+
+ /// Finds the \p key and return the item found
+ /**
+ The function is an analog of \ref cds_intrusive_IterableList_hp_get "get( Q const&)"
+ but \p pred is used for comparing the keys.
+
+ \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
+ in any order.
+ \p pred must imply the same element order as the comparator used for building the list.
+ */
+ template <typename Q, typename Less>
+ guarded_ptr get_with( Q const& key, Less pred ) const
+ {
+ CDS_UNUSED( pred );
+ guarded_ptr gp;
+ get_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
+ return gp;
+ }
+
+ /// Clears the list (thread safe, not atomic)
+ void clear()
+ {
+ position pos;
+ for ( pos.pCur = m_pHead.load( memory_model::memory_order_relaxed ); pos.pCur; pos.pCur = pos.pCur->next.load( memory_model::memory_order_relaxed )) {
+ while ( true ) {
+ pos.pFound = pos.guard.protect( pos.pCur->data );
+ if ( !pos.pFound )
+ break;
+ if ( cds_likely( unlink_node( pos ))) {
+ --m_ItemCounter;
+ break;
+ }
+ }
+ }
+ }
+
+ /// Checks if the list is empty
+ /**
+ Emptiness is checked by item counting: if item count is zero then the set is empty.
+ Thus, if you need to use \p %empty() you should provide appropriate (non-empty) \p iterable_list::traits::item_counter
+ feature.
+ */
+ bool empty() const
+ {
+ return size() == 0;
+ }
+
+ /// Returns list's item count
+ /**
+ The value returned depends on item counter provided by \p iterable_list::traits::item_counter. For \p atomicity::empty_item_counter,
+ this function always returns 0.
+ */
+ size_t size() const
+ {
+ return m_ItemCounter.value();
+ }
+
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
+ protected:
+ //@cond
+#if 0
+ // split-list support
+ bool insert_aux_node( node_type * pNode )
+ {
+ return insert_aux_node( m_pHead, pNode );
+ }
+
+ // split-list support
+ bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode )
+ {
+ assert( pNode != nullptr );
+
+ // Hack: convert node_type to value_type.
+ // In principle, auxiliary node can be non-reducible to value_type
+ // We assume that comparator can correctly distinguish aux and regular node.
+ return insert_at( refHead, *node_traits::to_value_ptr( pNode ) );
+ }
+#endif
+
+ bool insert_at( atomic_node_ptr& refHead, value_type& val )
+ {
+ position pos;
+
+ while ( true ) {
+ if ( search( refHead, val, pos, key_comparator() )) {
+ m_Stat.onInsertFailed();
+ return false;
+ }
+
+ if ( link_node( &val, pos ) ) {
+ ++m_ItemCounter;
+ m_Stat.onInsertSuccess();
+ return true;
+ }
+
+ m_Stat.onInsertRetry();
+ }
+ }
+
+ template <typename Func>
+ bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f )
+ {
+ position pos;
+
+ typename gc::Guard guard;
+ guard.assign( &val );
+
+ while ( true ) {
+ if ( search( refHead, val, pos, key_comparator() ) ) {
+ m_Stat.onInsertFailed();
+ return false;
+ }
+
+ if ( link_node( &val, pos ) ) {
+ f( val );
+ ++m_ItemCounter;
+ m_Stat.onInsertSuccess();
+ return true;
+ }
+
+ m_Stat.onInsertRetry();
+ }
+ }
+
+ template <typename Func>
+ std::pair<bool, bool> update_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bInsert )
+ {
+ position pos;
+
+ typename gc::Guard guard;
+ guard.assign( &val );
+
+ while ( true ) {
+ if ( search( refHead, val, pos, key_comparator() ) ) {
+ // try to replace pCur->data with val
+ assert( pos.pFound != nullptr );
+ assert( key_comparator()(*pos.pFound, val) == 0 );
+
+ if ( cds_likely( pos.pCur->data.compare_exchange_strong( pos.pFound, &val, memory_model::memory_order_release, atomics::memory_order_relaxed ))) {
+ if ( pos.pFound != &val ) {
+ retire_data( pos.pFound );
+ func( val, pos.pFound );
+ }
+ m_Stat.onUpdateExisting();
+ return std::make_pair( true, false );
+ }
+ }
+ else {
+ if ( !bInsert ) {
+ m_Stat.onUpdateFailed();
+ return std::make_pair( false, false );
+ }
+
+ if ( link_node( &val, pos )) {
+ func( val, static_cast<value_type*>( nullptr ));
+ ++m_ItemCounter;
+ m_Stat.onUpdateNew();
+ return std::make_pair( true, true );
+ }
+ }
+
+ m_Stat.onUpdateRetry();
+ }
+ }
+
+ bool unlink_at( atomic_node_ptr& refHead, value_type& val )
+ {
+ position pos;
+
+ back_off bkoff;
+ while ( search( refHead, val, pos, key_comparator())) {
+ if ( pos.pFound == &val ) {
+ if ( unlink_node( pos )) {
+ --m_ItemCounter;
+ m_Stat.onEraseSuccess();
+ return true;
+ }
+ else
+ bkoff();
+ }
+ else
+ break;
+
+ m_Stat.onEraseRetry();
+ }
+
+ m_Stat.onEraseFailed();
+ return false;
+ }
+
+ template <typename Q, typename Compare, typename Func>
+ bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f, position& pos )
+ {
+ back_off bkoff;
+ while ( search( refHead, val, pos, cmp )) {
+ if ( unlink_node( pos )) {
+ f( *pos.pFound );
+ --m_ItemCounter;
+ m_Stat.onEraseSuccess();
+ return true;
+ }
+ else
+ bkoff();
+
+ m_Stat.onEraseRetry();
+ }
+
+ m_Stat.onEraseFailed();
+ return false;
+ }
+
+ template <typename Q, typename Compare, typename Func>
+ bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f )
+ {
+ position pos;
+ return erase_at( refHead, val, cmp, f, pos );
+ }
+
+ template <typename Q, typename Compare>
+ bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
+ {
+ position pos;
+ return erase_at( refHead, val, cmp, [](value_type const&){}, pos );
+ }
+
+ template <typename Q, typename Compare>
+ bool extract_at( atomic_node_ptr& refHead, typename guarded_ptr::native_guard& dest, Q const& val, Compare cmp )
+ {
+ position pos;
+ back_off bkoff;
+ while ( search( refHead, val, pos, cmp )) {
+ if ( unlink_node( pos )) {
+ dest.set( pos.pFound );
+ --m_ItemCounter;
+ m_Stat.onEraseSuccess();
+ return true;
+ }
+ else
+ bkoff();
+
+ m_Stat.onEraseRetry();
+ }
+
+ m_Stat.onEraseFailed();
+ return false;
+ }
+
+ template <typename Q, typename Compare>
+ bool find_at( atomic_node_ptr const& refHead, Q const& val, Compare cmp ) const
+ {
+ position pos;
+ if ( search( refHead, val, pos, cmp ) ) {
+ m_Stat.onFindSuccess();
+ return true;
+ }
+
+ m_Stat.onFindFailed();
+ return false;
+ }
+
+ template <typename Q, typename Compare, typename Func>
+ bool find_at( atomic_node_ptr const& refHead, Q& val, Compare cmp, Func f ) const
+ {
+ position pos;
+ if ( search( refHead, val, pos, cmp )) {
+ assert( pos.pFound != nullptr );
+ f( *pos.pFound, val );
+ m_Stat.onFindSuccess();
+ return true;
+ }
+
+ m_Stat.onFindFailed();
+ return false;
+ }
+
+ template <typename Q, typename Compare>
+ iterator find_iterator_at( atomic_node_ptr const& refHead, Q const& val, Compare cmp ) const
+ {
+ position pos;
+ if ( search( refHead, val, pos, cmp )) {
+ assert( pos.pCur != nullptr );
+ assert( pos.pFound != nullptr );
+ m_Stat.onFindSuccess();
+ return iterator( pos.pCur, pos.pFound );
+ }
+
+ m_Stat.onFindFailed();
+ return iterator{};
+ }
+
+ template <typename Q, typename Compare>
+ bool get_at( atomic_node_ptr const& refHead, typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp ) const
+ {
+ position pos;
+ if ( search( refHead, val, pos, cmp )) {
+ guard.set( pos.pFound );
+ m_Stat.onFindSuccess();
+ return true;
+ }
+
+ m_Stat.onFindFailed();
+ return false;
+ }
+ //@endcond
+
+ protected:
+
+ //@cond
+ template <typename Q, typename Compare >
+ bool search( atomic_node_ptr const& refHead, const Q& val, position& pos, Compare cmp ) const
+ {
+ atomic_node_ptr* pHead = const_cast<atomic_node_ptr*>( &refHead );
+ node_type * pPrev = nullptr;
+
+ while ( true ) {
+ node_type * pCur = pHead->load( memory_model::memory_order_relaxed );
+
+ if ( pCur == nullptr ) {
+ // end-of-list
+ pos.pHead = pHead;
+ pos.pPrev = pPrev;
+ pos.pCur = nullptr;
+ pos.pFound = nullptr;
+ return false;
+ }
+
+ value_type * pVal = pos.guard.protect( pCur->data );
+
+ if ( pVal ) {
+ int nCmp = cmp( *pVal, val );
+ if ( nCmp >= 0 ) {
+ pos.pHead = pHead;
+ pos.pPrev = pPrev;
+ pos.pCur = pCur;
+ pos.pFound = pVal;
+ return nCmp == 0;
+ }
+ }
+
+ pPrev = pCur;
+ pHead = &( pCur->next );
+ }
+ }
+ //@endcond
+
+ private:
+ //@cond
+ node_type * alloc_node( value_type * pVal )
+ {
+ m_Stat.onNodeCreated();
+ return cxx_node_allocator().New( pVal );
+ }
+
+ void delete_node( node_type * pNode )
+ {
+ m_Stat.onNodeRemoved();
+ cxx_node_allocator().Delete( pNode );
+ }
+
+ static void retire_data( value_type * pVal )
+ {
+ assert( pVal != nullptr );
+ gc::template retire<disposer>( pVal );
+ }
+
+ void destroy()
+ {
+ node_type * pNode = m_pHead.load( memory_model::memory_order_relaxed );
+ while ( pNode ) {
+ value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed );
+ if ( pVal )
+ retire_data( pVal );
+ node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed );
+ delete_node( pNode );
+ pNode = pNext;
+ }
+ }
+
+ bool link_node( value_type * pVal, position& pos )
+ {
+ if ( pos.pPrev ) {
+ if ( pos.pPrev->data.load( memory_model::memory_order_relaxed ) == nullptr ) {
+ // reuse pPrev
+ value_type * p = nullptr;
+ return pos.pPrev->data.compare_exchange_strong( p, pVal, memory_model::memory_order_release, atomics::memory_order_relaxed );
+ }
+ else {
+ // insert new node between pos.pPrev and pos.pCur
+ node_type * pNode = alloc_node( pVal );
+ pNode->next.store( pos.pCur, memory_model::memory_order_relaxed );
+
+ if ( cds_likely( pos.pPrev->next.compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed )))
+ return true;
+
+ delete_node( pNode );
+ }
+ }
+ else {
+ node_type * pNode = alloc_node( pVal );
+ pNode->next.store( pos.pCur, memory_model::memory_order_relaxed );
+ if ( cds_likely( pos.pHead->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) )
+ return true;
+
+ delete_node( pNode );
+ }
+ return false;
+ }
+
+ static bool unlink_node( position& pos )
+ {
+ assert( pos.pCur != nullptr );
+ assert( pos.pFound != nullptr );
+
+ if ( pos.pCur->data.compare_exchange_strong( pos.pFound, nullptr, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ retire_data( pos.pFound );
+ return true;
+ }
+ return false;
+ }
+
+ //@endcond
+ };
+}} // namespace cds::intrusive
+
+#endif // #ifndef CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_IMPL_LAZY_LIST_H
- \p T - type to be stored in the list. The type must be based on lazy_list::node (for lazy_list::base_hook)
or it must have a member of type lazy_list::node (for lazy_list::member_hook).
- \p Traits - type traits. See lazy_list::traits for explanation.
- It is possible to declare option-based list with cds::intrusive::lazy_list::make_traits metafunction istead of \p Traits template
+ It is possible to declare option-based list with cds::intrusive::lazy_list::make_traits metafunction instead of \p Traits template
argument. For example, the following traits-based declaration of \p gc::HP lazy list
\code
#include <cds/intrusive/lazy_list_hp.h>
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
- typedef typename traits::back_off back_off; ///< back-off strategy
+ typedef typename traits::back_off back_off; ///< back-off strategy
typedef typename traits::item_counter item_counter; ///< Item counting policy used
- typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model)
+ typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model)
+ typedef typename traits::stat stat; ///< Internal statistics
+
+ static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type");
typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >;
//@endcond
protected:
node_type m_Tail;
item_counter m_ItemCounter;
+ stat m_Stat; ///< Internal statistics
- //@cond
struct clean_disposer {
void operator()( value_type * p )
{
/// Default constructor initializes empty list
LazyList()
{
- static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed );
}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyList( Stat& st )
+ : m_Stat( st )
+ {
+ m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed );
+ }
+ //@endcond
+
/// Destroys the list object
~LazyList()
{
this function always returns 0.
@note Even if you use real item counter and it returns 0, this fact does not mean that the list
- is empty. To check list emptyness use \p empty() method.
+ is empty. To check list emptiness use \p empty() method.
*/
size_t size() const
{
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
// split-list support
if ( validate( pos.pPred, pos.pCur )) {
if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
// failed: key already in list
+ m_Stat.onInsertFailed();
return false;
}
else {
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
- ++m_ItemCounter;
- return true;
+ break;
}
}
}
+
+ m_Stat.onInsertRetry();
}
+
+ ++m_ItemCounter;
+ m_Stat.onInsertSuccess();
+ return true;
}
template <typename Func>
if ( validate( pos.pPred, pos.pCur )) {
if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
// failed: key already in list
+ m_Stat.onInsertFailed();
return false;
}
else {
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
f( val );
- ++m_ItemCounter;
- return true;
+ break;
}
}
}
+
+ m_Stat.onInsertRetry();
}
+
+ ++m_ItemCounter;
+ m_Stat.onInsertSuccess();
+ return true;
}
template <typename Func>
// key already in the list
func( false, *node_traits::to_value_ptr( *pos.pCur ) , val );
+ m_Stat.onUpdateExisting();
return std::make_pair( true, false );
}
else {
// new key
- if ( !bAllowInsert )
+ if ( !bAllowInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( false, false );
+ }
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
func( true, val, val );
- ++m_ItemCounter;
- return std::make_pair( true, true );
+ break;
}
}
}
+
+ m_Stat.onUpdateRetry();
}
+
+ ++m_ItemCounter;
+ m_Stat.onUpdateNew();
+ return std::make_pair( true, true );
}
bool unlink_at( node_type * pHead, value_type& val )
{
// item found
unlink_node( pos.pPred, pos.pCur, pHead );
- --m_ItemCounter;
nResult = 1;
}
else
nResult = -1;
}
}
+
if ( nResult ) {
if ( nResult > 0 ) {
+ --m_ItemCounter;
retire_node( pos.pCur );
+ m_Stat.onEraseSuccess();
return true;
}
+
+ m_Stat.onEraseFailed();
return false;
}
}
+
+ m_Stat.onEraseRetry();
}
}
// key found
unlink_node( pos.pPred, pos.pCur, pHead );
f( *node_traits::to_value_ptr( *pos.pCur ));
- --m_ItemCounter;
nResult = 1;
}
else {
}
if ( nResult ) {
if ( nResult > 0 ) {
+ --m_ItemCounter;
retire_node( pos.pCur );
+ m_Stat.onEraseSuccess();
return true;
}
+
+ m_Stat.onEraseFailed();
return false;
}
}
+
+ m_Stat.onEraseRetry();
}
}
&& cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 )
{
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
}
+
+ m_Stat.onFindFailed();
return false;
}
position pos;
search( pHead, val, pos, cmp );
- return pos.pCur != &m_Tail
- && !pos.pCur->is_marked()
- && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0;
+ if ( pos.pCur != &m_Tail && !pos.pCur->is_marked() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
+ m_Stat.onFindSuccess();
+ return true;
+ }
+
+ m_Stat.onFindFailed();
+ return false;
}
template <typename Q, typename Compare>
&& cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 )
{
gp.set( pos.guards.template get<value_type>( position::guard_current_item ));
+ m_Stat.onFindSuccess();
return true;
}
+
+ m_Stat.onFindFailed();
return false;
}
pos.pPred = pPrev.ptr();
}
- static bool validate( node_type * pPred, node_type * pCur )
+ bool validate( node_type * pPred, node_type * pCur ) CDS_NOEXCEPT
+ {
+ if ( validate_link( pPred, pCur )) {
+ m_Stat.onValidationSuccess();
+ return true;
+ }
+
+ m_Stat.onValidationFailed();
+ return true;
+ }
+
+ static bool validate_link( node_type * pPred, node_type * pCur ) CDS_NOEXCEPT
{
return !pPred->is_marked()
&& !pCur->is_marked()
# endif
typedef typename traits::disposer disposer; ///< disposer used
+ typedef typename traits::stat stat; ///< Internal statistics
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits
typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >;
//@endcond
protected:
typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
- atomic_node_ptr m_pHead; ///< Head pointer
- item_counter m_ItemCounter; ///< Item counter
+ atomic_node_ptr m_pHead; ///< Head pointer
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@cond
/// Position pointer for item search
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : m_pHead( nullptr )
+ , m_Stat( st )
+ {}
+ //@endcond
+
/// Destroys the list object
~MichaelList()
{
that during changing no any other modifications could be made on this item by concurrent threads.
Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
- \p second is \p true if new item has been added or \p false if the item with \p key
- already is in the list.
+ \p second is \p true if new item has been added or \p false if the item with that key
+ already in the list.
@warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
*/
this function always returns 0.
@note Even if you use real item counter and it returns 0, this fact does not mean that the list
- is empty. To check list emptyness use \p empty() method.
+ is empty. To check list emptiness use \p empty() method.
*/
size_t size() const
{
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
// split-list support
position pos;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator()))
+ if ( search( refHead, val, pos, key_comparator())) {
+ m_Stat.onInsertFailed();
return false;
+ }
if ( link_node( pNode, pos )) {
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
- // clear next field
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
position pos;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator()))
+ if ( search( refHead, val, pos, key_comparator())) {
+ m_Stat.onInsertFailed();
return false;
+ }
typename gc::Guard guard;
guard.assign( &val );
if ( link_node( pNode, pos )) {
f( val );
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
- // clear next field
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
if ( search( refHead, val, pos, key_comparator())) {
if ( cds_unlikely( pos.pCur->m_pNext.load(memory_model::memory_order_acquire).bits())) {
back_off()();
+ m_Stat.onUpdateMarked();
continue; // the node found is marked as deleted
}
assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 );
func( false, *node_traits::to_value_ptr( *pos.pCur ) , val );
+ m_Stat.onUpdateExisting();
return std::make_pair( true, false );
}
else {
- if ( !bInsert )
+ if ( !bInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( false, false );
+ }
typename gc::Guard guard;
guard.assign( &val );
if ( link_node( pNode, pos )) {
++m_ItemCounter;
func( true, val, val );
+ m_Stat.onUpdateNew();
return std::make_pair( true, true );
}
- // clear next field
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
}
+
+ m_Stat.onUpdateRetry();
}
}
if ( node_traits::to_value_ptr( *pos.pCur ) == &val ) {
if ( unlink_node( pos )) {
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
else
bkoff();
}
- else
+ else {
+ m_Stat.onUpdateFailed();
break;
+ }
+
+ m_Stat.onEraseRetry();
}
+
+ m_Stat.onEraseFailed();
return false;
}
if ( unlink_node( pos )) {
f( *node_traits::to_value_ptr( *pos.pCur ));
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
else
bkoff();
+
+ m_Stat.onEraseRetry();
}
+
+ m_Stat.onEraseFailed();
return false;
}
if ( unlink_node( pos )) {
dest.set( pos.guards.template get<value_type>( position::guard_current_item ));
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
else
bkoff();
+ m_Stat.onEraseRetry();
}
+
+ m_Stat.onEraseFailed();
return false;
}
bool find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
position pos;
- return search( refHead, val, pos, cmp );
+ if ( search( refHead, val, pos, cmp ) ) {
+ m_Stat.onFindSuccess();
+ return true;
+ }
+
+ m_Stat.onFindFailed();
+ return false;
}
template <typename Q, typename Compare, typename Func>
position pos;
if ( search( refHead, val, pos, cmp )) {
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
+
+ m_Stat.onFindFailed();
return false;
}
position pos;
if ( search( refHead, val, pos, cmp )) {
guard.set( pos.guards.template get<value_type>( position::guard_current_item ));
+ m_Stat.onFindSuccess();
return true;
}
+
+ m_Stat.onFindFailed();
return false;
}
marked_node_ptr cur( pCur.ptr());
if ( cds_unlikely( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
retire_node( pCur.ptr());
+ m_Stat.onHelpingSuccess();
}
else {
bkoff();
+ m_Stat.onHelpingFailed();
goto try_again;
}
}
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H
+#define CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H
+
+#include <cds/intrusive/impl/iterable_list.h>
+#include <cds/gc/dhp.h>
+
+#endif // #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H
+#define CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H
+
+#include <cds/gc/hp.h>
+#include <cds/intrusive/impl/iterable_list.h>
+
+#endif // #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_LAZY_LIST_NOGC_H
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
- typedef typename traits::item_counter item_counter; ///< Item counting policy used
- typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see lazy_list::traits::memory_model)
+ typedef typename traits::item_counter item_counter; ///< Item counting policy used
+ typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model)
+ typedef typename traits::stat stat; ///< Internal statistics
//@cond
+ static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type");
+
// Rebind traits (split-list support)
template <typename... Options>
struct rebind_traits {
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >;
//@endcond
protected:
node_type m_Head; ///< List head (dummy node)
node_type m_Tail; ///< List tail (dummy node)
item_counter m_ItemCounter; ///< Item counter
+ mutable stat m_Stat; ///< Internal statistics
//@cond
/// Default constructor initializes empty list
LazyList()
{
- static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
m_Head.m_pNext.store( &m_Tail, memory_model::memory_order_relaxed );
}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyList( Stat& st )
+ : m_Stat( st )
+ {
+ m_Head.m_pNext.store( &m_Tail, memory_model::memory_order_relaxed );
+ }
+ //@endcond
+
/// Destroys the list object
~LazyList()
{
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
// split-list support
if ( validate( pos.pPred, pos.pCur )) {
if ( pos.pCur != &m_Tail && equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred ) ) {
// failed: key already in list
+ m_Stat.onInsertFailed();
return false;
}
else {
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
- ++m_ItemCounter;
- return true;
+ break;
}
}
}
+
+ m_Stat.onInsertRetry();
}
+
+ ++m_ItemCounter;
+ m_Stat.onInsertSuccess();
+ return true;
}
iterator insert_at_( node_type * pHead, value_type& val )
// key already in the list
func( false, *node_traits::to_value_ptr( *pos.pCur ) , val );
+ m_Stat.onUpdateExisting();
return std::make_pair( iterator( pos.pCur ), false );
}
else {
// new key
- if ( !bAllowInsert )
+ if ( !bAllowInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( end(), false );
+ }
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
func( true, val, val );
- ++m_ItemCounter;
- return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
+ break;
}
}
+
+ m_Stat.onUpdateRetry();
}
}
+
+ ++m_ItemCounter;
+ m_Stat.onUpdateNew();
+ return std::make_pair( iterator( node_traits::to_node_ptr( val ) ), true );
}
template <typename Func>
if ( equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred ) )
{
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
}
+
+ m_Stat.onFindFailed();
return false;
}
search( pHead, val, pos, pred );
if ( pos.pCur != &m_Tail ) {
- if ( equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred ))
+ if ( equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) {
+ m_Stat.onFindSuccess();
return iterator( pos.pCur );
+ }
}
+
+ m_Stat.onFindFailed();
return end();
}
return cmp(l, r) == 0;
}
- static bool validate( node_type * pPred, node_type * pCur )
+ bool validate( node_type * pPred, node_type * pCur )
{
- return pPred->m_pNext.load(memory_model::memory_order_acquire) == pCur;
+ if ( pPred->m_pNext.load(memory_model::memory_order_acquire) == pCur ) {
+ m_Stat.onValidationSuccess();
+ return true;
+ }
+
+ m_Stat.onValidationFailed();
+ return false;
}
// for split-list
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H
- \p RCU - one of \ref cds_urcu_gc "RCU type"
- \p T - type to be stored in the list
- \p Traits - type traits. See \p lazy_list::traits for explanation.
-
- It is possible to declare option-based list with \p %cds::intrusive::lazy_list::make_traits metafunction istead of \p Traits template
- argument. Template argument list \p Options of cds::intrusive::lazy_list::make_traits metafunction are:
- - opt::hook - hook used. Possible values are: lazy_list::base_hook, lazy_list::member_hook, lazy_list::traits_hook.
- If the option is not specified, <tt>lazy_list::base_hook<></tt> is used.
- - opt::compare - key comparison functor. No default functor is provided.
- If the option is not specified, the opt::less is used.
- - opt::less - specifies binary predicate used for key comparison. Default is \p std::less<T>.
- - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used.
- - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer
- - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock
- - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter
- - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default)
- or opt::v::sequential_consistent (sequentially consisnent memory model).
+ It is possible to declare option-based list with \p %cds::intrusive::lazy_list::make_traits metafunction instead of \p Traits template
+ argument.
\par Usage
Before including <tt><cds/intrusive/lazy_list_rcu.h></tt> you should include appropriate RCU header file,
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
- typedef typename traits::back_off back_off; ///< back-off strategy (not used)
- typedef typename traits::item_counter item_counter; ///< Item counting policy used
- typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model)
- typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
+ typedef typename traits::back_off back_off; ///< back-off strategy (not used)
+ typedef typename traits::item_counter item_counter; ///< Item counting policy used
+ typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model)
+ typedef typename traits::stat stat; ///< Internal statistics
+ typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = true; ///< Group of \p extract_xxx functions require external locking
+ static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type");
+
//@cond
// Rebind traits (split-list support)
template <typename... Options>
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >;
//@endcond
protected:
- typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer
- typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support)
+ //@cond
+ typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer
+ typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support)
+ //@endcond
protected:
node_type m_Head; ///< List head (dummy node)
node_type m_Tail; ///< List tail (dummy node)
item_counter m_ItemCounter; ///< Item counter
+ mutable stat m_Stat; ///< Internal statistics
//@cond
/// Default constructor initializes empty list
LazyList()
{
- static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed );
}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, lazy_list::wrapped_stat<Stat>>::value >>
+ explicit LazyList( Stat& st )
+ : m_Stat( st )
+ {
+ m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed );
+ }
+ //@endcond
+
/// Destroys the list object
~LazyList()
{
this function always returns 0.
<b>Warning</b>: even if you use real item counter and it returns 0, this fact is not mean that the list
- is empty. To check list emptyness use \ref empty() method.
+ is empty. To check list emptiness use \ref empty() method.
*/
size_t size() const
{
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
// split-list support
if ( validate( pos.pPred, pos.pCur )) {
if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
// failed: key already in list
+ m_Stat.onInsertFailed();
return false;
}
f( val );
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
- ++m_ItemCounter;
- return true;
+ break;
}
}
+
+ m_Stat.onInsertRetry();
}
+
+ ++m_ItemCounter;
+ m_Stat.onInsertSuccess();
+ return true;
}
iterator insert_at_( node_type * pHead, value_type& val )
{
// item found
unlink_node( pos.pPred, pos.pCur, pHead );
- --m_ItemCounter;
nResult = 1;
}
else
if ( nResult ) {
if ( nResult > 0 ) {
+ --m_ItemCounter;
dispose_node( pos.pCur );
+ m_Stat.onEraseSuccess();
return true;
}
+
+ m_Stat.onEraseFailed();
return false;
}
+
+ m_Stat.onEraseRetry();
}
}
// key found
unlink_node( pos.pPred, pos.pCur, pHead );
f( *node_traits::to_value_ptr( *pos.pCur ));
- --m_ItemCounter;
nResult = 1;
}
else
if ( nResult ) {
if ( nResult > 0 ) {
+ --m_ItemCounter;
dispose_node( pos.pCur );
+ m_Stat.onEraseSuccess();
return true;
}
+
+ m_Stat.onEraseFailed();
return false;
}
+
+ m_Stat.onEraseRetry();
}
}
if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
// key found
unlink_node( pos.pPred, pos.pCur, pHead );
- --m_ItemCounter;
nResult = 1;
}
else {
}
if ( nResult ) {
- if ( nResult > 0 )
+ if ( nResult > 0 ) {
+ --m_ItemCounter;
+ m_Stat.onEraseSuccess();
return node_traits::to_value_ptr( pos.pCur );
+ }
+
+ m_Stat.onEraseFailed();
return nullptr;
}
+
+ m_Stat.onEraseRetry();
}
}
search( pHead, val, pos, cmp );
if ( pos.pCur != &m_Tail ) {
std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock );
- if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 )
- {
+ if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
}
+
+ m_Stat.onFindFailed();
return false;
}
search( pHead, val, pos, cmp );
if ( pos.pCur != &m_Tail ) {
- if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 )
+ if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
+ m_Stat.onFindSuccess();
return const_iterator( pos.pCur );
+ }
}
+
+ m_Stat.onFindFailed();
return end();
}
pos.pPred = pPrev.ptr();
}
- static bool validate( node_type * pPred, node_type * pCur ) CDS_NOEXCEPT
+ bool validate( node_type * pPred, node_type * pCur ) CDS_NOEXCEPT
+ {
+ if ( validate_link( pPred, pCur ) ) {
+ m_Stat.onValidationSuccess();
+ return true;
+ }
+
+ m_Stat.onValidationFailed();
+ return false;
+ }
+
+ static bool validate_link( node_type * pPred, node_type * pCur ) CDS_NOEXCEPT
{
// RCU lock should be locked
assert( gc::is_locked());
if ( validate( pos.pPred, pos.pCur )) {
if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) {
// failed: key already in list
+ m_Stat.onInsertFailed();
return false;
}
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
- ++m_ItemCounter;
- return true;
+ break;
}
}
+
+ m_Stat.onInsertRetry();
}
+
+ ++m_ItemCounter;
+ m_Stat.onInsertSuccess();
+ return true;
+
}
template <typename Func>
// key already in the list
func( false, *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onUpdateExisting();
return std::make_pair( iterator( pos.pCur ), false );
}
else {
// new key
- if ( !bAllowInsert )
+ if ( !bAllowInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( end(), false );
+ }
func( true, val, val );
link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur );
- ++m_ItemCounter;
- return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
+ break;
}
}
}
+
+ m_Stat.onUpdateRetry();
}
+
+ ++m_ItemCounter;
+ m_Stat.onUpdateNew();
+ return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
}
//@endcond
};
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H
#include <cds/gc/nogc.h>
#include <cds/details/make_const_type.h>
-
namespace cds { namespace intrusive {
namespace michael_list {
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits
typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
- typedef typename traits::back_off back_off; ///< back-off strategy
- typedef typename traits::item_counter item_counter; ///< Item counting policy used
- typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename traits::back_off back_off; ///< back-off strategy
+ typedef typename traits::item_counter item_counter; ///< Item counting policy used
+ typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename traits::stat stat; ///< Internal statistics
//@cond
+ static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type");
+
// Rebind traits (split-list support)
template <typename... Options>
struct rebind_traits {
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >;
//@endcond
protected:
atomic_node_ptr m_pHead; ///< Head pointer
item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@cond
/// Position pointer for item search
/// Default constructor initializes empty list
MichaelList()
: m_pHead( nullptr )
- {
- static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
- }
+ {}
+
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : m_pHead( nullptr )
+ , m_Stat( st )
+ {}
+ //@endcond
/// Destroys the list objects
~MichaelList()
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
// split-list support
position pos;
while ( true ) {
- if ( search( refHead, val, key_comparator(), pos ) )
+ if ( search( refHead, val, key_comparator(), pos )) {
+ m_Stat.onInsertFailed();
return false;
+ }
if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
+
+ m_Stat.onInsertRetry();
}
}
assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 );
func( false, *node_traits::to_value_ptr( *pos.pCur ) , val );
+ m_Stat.onUpdateExisting();
return std::make_pair( iterator( pos.pCur ), false );
}
else {
- if ( !bAllowInsert )
+ if ( !bAllowInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( end(), false );
+ }
if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
++m_ItemCounter;
func( true, val , val );
+ m_Stat.onUpdateNew();
return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
}
}
+
+ m_Stat.onUpdateRetry();
}
}
if ( search( refHead, val, cmp, pos ) ) {
assert( pos.pCur != nullptr );
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
+
+ m_Stat.onFindFailed();
return false;
}
value_type * find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
iterator it = find_at_( refHead, val, cmp );
- if ( it != end() )
+ if ( it != end() ) {
+ m_Stat.onFindSuccess();
return &*it;
+ }
+
+ m_Stat.onFindFailed();
return nullptr;
}
if ( search( refHead, val, cmp, pos ) ) {
assert( pos.pCur != nullptr );
+ m_Stat.onFindSuccess();
return iterator( pos.pCur );
}
+
+ m_Stat.onFindFailed();
return end();
}
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits
typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
- typedef cds::urcu::gc<RCU> gc; ///< RCU schema
- typedef typename traits::back_off back_off; ///< back-off strategy
- typedef typename traits::item_counter item_counter; ///< Item counting policy used
- typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
- typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
+ typedef cds::urcu::gc<RCU> gc; ///< RCU schema
+ typedef typename traits::back_off back_off; ///< back-off strategy
+ typedef typename traits::item_counter item_counter; ///< Item counting policy used
+ typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
+ typedef typename traits::stat stat; ///< Internal statistics
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >;
//@endcond
protected:
- typedef typename node_type::marked_ptr marked_node_ptr ; ///< Marked node pointer
- typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic node pointer
- typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support)
+ typedef typename node_type::marked_ptr marked_node_ptr; ///< Marked node pointer
+ typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic node pointer
+ typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
- atomic_node_ptr m_pHead ; ///< Head pointer
- item_counter m_ItemCounter ; ///< Item counter
+ atomic_node_ptr m_pHead; ///< Head pointer
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
protected:
//@cond
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : m_pHead( nullptr )
+ , m_Stat( st )
+ {}
+ //@endcond
+
/// Destroy list
~MichaelList()
{
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
protected:
//@cond
// split-list support
{
rcu_lock l;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator()))
+ if ( search( refHead, val, pos, key_comparator())) {
+ m_Stat.onInsertFailed();
return false;
+ }
if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
f( val );
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
// clear next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
for (;;) {
{
rcu_lock l;
- if ( !search( refHead, val, pos, key_comparator() ) || node_traits::to_value_ptr( *pos.pCur ) != &val )
+ if ( !search( refHead, val, pos, key_comparator() ) || node_traits::to_value_ptr( *pos.pCur ) != &val ) {
+ m_Stat.onEraseFailed();
return false;
+ }
if ( !unlink_node( pos, erase_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
}
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
}
for (;;) {
{
rcu_lock l;
- if ( !search( pos.refHead, val, pos, cmp ) )
+ if ( !search( pos.refHead, val, pos, cmp ) ) {
+ m_Stat.onEraseFailed();
return false;
+ }
+
// store pCur since it may be changed by unlink_node() slow path
pDel = pos.pCur;
if ( !unlink_node( pos, erase_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
}
assert( pDel );
f( *node_traits::to_value_ptr( pDel ) );
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
}
{
rcu_lock l;
for (;;) {
- if ( !search( refHead, val, pos, cmp ) )
+ if ( !search( refHead, val, pos, cmp )) {
+ m_Stat.onEraseFailed();
return nullptr;
+ }
+
// store pCur since it may be changed by unlink_node() slow path
pExtracted = pos.pCur;
if ( !unlink_node( pos, extract_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
--m_ItemCounter;
value_type * pRet = node_traits::to_value_ptr( pExtracted );
assert( pExtracted->m_pDelChain == nullptr );
+ m_Stat.onEraseSuccess();
return pRet;
}
}
if ( search( refHead, val, pos, cmp ) ) {
assert( pos.pCur != nullptr );
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
- return false;
- }
+ }
+
+ m_Stat.onFindFailed();
+ return false;
}
template <typename Q, typename Compare>
position pos( refHead );
- if ( search( refHead, val, pos, cmp ))
+ if ( search( refHead, val, pos, cmp )) {
+ m_Stat.onFindSuccess();
return raw_ptr( node_traits::to_value_ptr( pos.pCur ), raw_ptr_disposer( pos ));
+ }
+
+ m_Stat.onFindFailed();
return raw_ptr( raw_ptr_disposer( pos ));
}
//@endcond
if ( cds_likely( pPrev->compare_exchange_weak( pCur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
if ( pNext.bits() == erase_mask )
link_to_remove_chain( pos, pCur.ptr() );
+ m_Stat.onHelpingSuccess();
}
+ m_Stat.onHelpingFailed();
goto try_again;
}
assert( gc::is_locked() );
while ( true ) {
- if ( search( pos.refHead, val, pos, key_comparator() ) )
+ if ( search( pos.refHead, val, pos, key_comparator() )) {
+ m_Stat.onInsertFailed();
return false;
+ }
if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
// clear next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 );
func( false, *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onUpdateExisting();
return std::make_pair( iterator( pos.pCur ), false );
}
else {
- if ( !bInsert )
+ if ( !bInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( end(), false );
+ }
if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
++m_ItemCounter;
func( true, val , val );
+ m_Stat.onUpdateNew();
return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
}
// clear the next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onUpdateRetry();
}
}
}
if ( search( pos.refHead, val, pos, cmp ) ) {
assert( pos.pCur != nullptr );
+ m_Stat.onFindSuccess();
return const_iterator( pos.pCur );
}
+
+ m_Stat.onFindFailed();
return cend();
}
//@endcond
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_H
#define CDSLIB_INTRUSIVE_MICHAEL_SET_H
#include <cds/intrusive/details/michael_set_base.h>
-#include <cds/details/allocator.h>
+#include <cds/intrusive/details/iterable_list_base.h>
namespace cds { namespace intrusive {
Template parameters are:
- \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for \p OrderedList
- - \p OrderedList - ordered list implementation used as bucket for hash set, for example, \p MichaelList, \p LazyList.
+ - \p OrderedList - ordered list implementation used as bucket for hash set, for example, \p MichaelList, \p LazyList, \p IterableList.
The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation
schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for
the ordered list.
\code
// Our node type
struct Foo {
- std::string key_; // key field
+ std::string key_; // key field
// ... other fields
};
public:
typedef GC gc; ///< Garbage collector
typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation
- typedef ordered_list bucket_type; ///< bucket type
- typedef Traits traits; ///< Set traits
+ typedef Traits traits; ///< Set traits
- typedef typename ordered_list::value_type value_type ; ///< type of value to be stored in the set
- typedef typename ordered_list::key_comparator key_comparator ; ///< key comparing functor
- typedef typename ordered_list::disposer disposer ; ///< Node disposer functor
+ typedef typename ordered_list::value_type value_type ; ///< type of value to be stored in the set
+ typedef typename ordered_list::key_comparator key_comparator ; ///< key comparing functor
+ typedef typename ordered_list::disposer disposer ; ///< Node disposer functor
+ typedef typename ordered_list::stat stat ; ///< Internal statistics
/// Hash functor for \p value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
typedef typename ordered_list::guarded_ptr guarded_ptr; ///< Guarded pointer
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
-
/// Count of hazard pointer required for the algorithm
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount;
- protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
- private:
- //@cond
- const size_t m_nHashBitmask;
- //@endcond
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
+ "cds::atomicity::empty_item_counter is not allowed as a item counter");
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename Q>
- size_t hash_value( const Q& key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
- /// Returns the bucket (ordered list) for \p key
- template <typename Q>
- bucket_type& bucket( const Q& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
+
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+
+ hash m_HashFunctor; ///< Hash functor
+ size_t const m_nHashBitmask;
+ internal_bucket_type* m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ typename bucket_stat::stat m_Stat; ///< Internal statistics
//@endcond
public:
- ///@name Forward iterators (only for debugging purpose)
+ ///@name Forward iterators
//@{
/// Forward iterator
/**
- it has no post-increment operator
- it iterates items in unordered fashion
- The iterator cannot be moved across thread boundary because it may contain GC's guard that is thread-private GC data.
- - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent
- deleting operations it is no guarantee that you iterate all item in the set.
- Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread.
- @warning Use this iterator on the concurrent container for debugging purpose only.
+ Iterator thread safety depends on type of \p OrderedList:
+ - for \p MichaelList and \p LazyList: iterator guarantees safety even if you delete the item that iterator points to
+ because that item is guarded by hazard pointer.
+ However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the set.
+ Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread.
+ Use this iterator on the concurrent container for debugging purpose only.
+ - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment.
+
*/
- typedef michael_set::details::iterator< bucket_type, false > iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, false > iterator;
/// Const forward iterator
/**
For iterator's features and requirements see \ref iterator
*/
- typedef michael_set::details::iterator< bucket_type, true > const_iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator;
/// Returns a forward iterator addressing the first element in a set
/**
*/
iterator begin()
{
- return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() );
+ return iterator( m_Buckets[0].begin(), bucket_begin(), bucket_end() );
}
/// Returns an iterator that addresses the location succeeding the last element in a set
*/
iterator end()
{
- return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
+ return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end() );
}
/// Returns a forward const iterator addressing the first element in a set
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- return const_iterator( m_Buckets[0].cbegin(), m_Buckets, m_Buckets + bucket_count() );
- }
- const_iterator get_const_end() const
- {
- return const_iterator( m_Buckets[bucket_count() - 1].cend(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
- }
- //@endcond
-
public:
/// Initializes hash set
/** @anchor cds_intrusive_MichaelHashSet_hp_ctor
size_t nMaxItemCount, ///< estimation of max item count in the hash set
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket. Small integer up to 10.
) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count()))
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
- "cds::atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash set object and destroys it
~MichaelHashSet()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node
If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true.
Otherwise, the functor \p func is called with item found.
- The functor signature is:
- \code
- struct functor {
- void operator()( bool bNew, value_type& item, value_type& val );
- };
- \endcode
- with arguments:
- - \p bNew - \p true if the item has been inserted, \p false otherwise
- - \p item - item of the set
- - \p val - argument \p val passed into the \p %update() function
- If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
- refers to the same thing.
- The functor may change non-key fields of the \p item.
+ The functor signature depends of the type of \p OrderedList:
+
+ <b>for \p MichaelList, \p LazyList</b>
+ \code
+ struct functor {
+ void operator()( bool bNew, value_type& item, value_type& val );
+ };
+ \endcode
+ with arguments:
+ - \p bNew - \p true if the item has been inserted, \p false otherwise
+ - \p item - item of the set
+ - \p val - argument \p val passed into the \p %update() function
+ If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
+ refers to the same thing.
+
+ The functor may change non-key fields of the \p item.
+ @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
+ \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level
+ synchronization.
+
+ <b>for \p IterableList</b>
+ \code
+ void func( value_type& val, value_type * old );
+ \endcode
+ where
+ - \p val - argument \p val passed into the \p %update() function
+ - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr.
Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful,
\p second is \p true if new item has been added or \p false if the item with \p key
already is in the set.
-
- @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
- \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level
- synchronization.
*/
template <typename Func>
std::pair<bool, bool> update( value_type& val, Func func, bool bAllowInsert = true )
}
//@endcond
+ /// Inserts or updates the node (only for \p IterableList)
+ /**
+ The operation performs inserting or changing data with lock-free manner.
+
+ If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true.
+ Otherwise, the current element is changed to \p val, the old element will be retired later
+ by call \p Traits::disposer.
+
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
+ \p second is \p true if \p val has been added or \p false if the item with that key
+ already in the set.
+ */
+#ifdef CDS_DOXYGEN_INVOKED
+ std::pair<bool, bool> upsert( value_type& val, bool bAllowInsert = true )
+#else
+ template <typename Q>
+ typename std::enable_if<
+ std::is_same< Q, value_type>::value && is_iterable_list< ordered_list >::value,
+ std::pair<bool, bool>
+ >::type
+ upsert( Q& val, bool bAllowInsert = true )
+#endif
+ {
+ std::pair<bool, bool> bRet = bucket( val ).upsert( val, bAllowInsert );
+ if ( bRet.second )
+ ++m_ItemCounter;
+ return bRet;
+ }
+
/// Unlinks the item \p val from the set
/**
The function searches the item \p val in the set and unlink it
}
//@endcond
+ /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList)
+ /**
+ If \p key is not found the function returns \p end().
+
+ @note This function is supported only for the set based on \p IterableList
+ */
+ template <typename Q>
+#ifdef CDS_DOXYGEN_INVOKED
+ iterator
+#else
+ typename std::enable_if< std::is_same<Q,Q>::value && is_iterable_list< ordered_list >::value, iterator >::type
+#endif
+ find( Q& key )
+ {
+ internal_bucket_type& b = bucket( key );
+ typename internal_bucket_type::iterator it = b.find( key );
+ if ( it == b.end() )
+ return end();
+ return iterator( it, &b, bucket_end());
+ }
+ //@cond
+ template <typename Q>
+ typename std::enable_if< std::is_same<Q, Q>::value && is_iterable_list< ordered_list >::value, iterator >::type
+ find( Q const& key )
+ {
+ internal_bucket_type& b = bucket( key );
+ typename internal_bucket_type::iterator it = b.find( key );
+ if ( it == b.end() )
+ return end();
+ return iterator( it, &b, bucket_end() );
+ }
+ //@endcond
+
+
/// Finds the key \p key using \p pred predicate for searching
/**
The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_find_func "find(Q&, Func)"
}
//@endcond
+ /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList)
+ /**
+ The function is an analog of \p find(Q&) but \p pred is used for key comparing.
+ \p Less functor has the interface like \p std::less.
+ \p pred must imply the same element order as the comparator used for building the set.
+
+ If \p key is not found the function returns \p end().
+
+ @note This function is supported only for the set based on \p IterableList
+ */
+ template <typename Q, typename Less>
+#ifdef CDS_DOXYGEN_INVOKED
+ iterator
+#else
+ typename std::enable_if< std::is_same<Q, Q>::value && is_iterable_list< ordered_list >::value, iterator >::type
+#endif
+ find_with( Q& key, Less pred )
+ {
+ internal_bucket_type& b = bucket( key );
+ typename internal_bucket_type::iterator it = b.find_with( key, pred );
+ if ( it == b.end() )
+ return end();
+ return iterator( it, &b, bucket_end() );
+ }
+ //@cond
+ template <typename Q, typename Less>
+ typename std::enable_if< std::is_same<Q, Q>::value && is_iterable_list< ordered_list >::value, iterator >::type
+ find_with( Q const& key, Less pred )
+ {
+ internal_bucket_type& b = bucket( key );
+ typename internal_bucket_type::iterator it = b.find_with( key, pred );
+ if ( it == b.end() )
+ return end();
+ return iterator( it, &b, bucket_end() );
+ }
+ //@endcond
+
/// Checks whether the set contains \p key
/**
{
return bucket( key ).contains( key );
}
- //@cond
- template <typename Q>
- CDS_DEPRECATED("use contains()")
- bool find( Q const& key )
- {
- return contains( key );
- }
- //@endcond
/// Checks whether the set contains \p key using \p pred predicate for searching
/**
{
return bucket( key ).contains( key, pred );
}
- //@cond
- template <typename Q, typename Less>
- CDS_DEPRECATED("use contains()")
- bool find_with( Q const& key, Less pred )
- {
- return contains( key, pred );
- }
- //@endcond
/// Finds the key \p key and return the item found
/** \anchor cds_intrusive_MichaelHashSet_hp_get
return m_ItemCounter;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
/// Returns the size of hash table
/**
Since \p %MichaelHashSet cannot dynamically extend the hash table size,
{
return m_nHashBitmask + 1;
}
+
+ private:
+ //@cond
+ internal_bucket_type * bucket_begin() const
+ {
+ return m_Buckets;
+ }
+
+ internal_bucket_type * bucket_end() const
+ {
+ return m_Buckets + bucket_count();
+ }
+
+ const_iterator get_const_begin() const
+ {
+ return const_iterator( m_Buckets[0].cbegin(), bucket_begin(), bucket_end() );
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( bucket_end()[-1].cend(), bucket_end() - 1, bucket_end() );
+ }
+
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( const Q& key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename Q>
+ internal_bucket_type& bucket( const Q& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ //@endcond
};
}} // namespace cds::intrusive
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H
#include <cds/intrusive/details/michael_set_base.h>
#include <cds/gc/nogc.h>
-#include <cds/details/allocator.h>
namespace cds { namespace intrusive {
class MichaelHashSet< cds::gc::nogc, OrderedList, Traits >
{
public:
- typedef cds::gc::nogc gc; ///< Garbage collector
- typedef OrderedList bucket_type; ///< Type of ordered list to be used as buckets
- typedef Traits traits; ///< Set traits
+ typedef cds::gc::nogc gc; ///< Garbage collector
+ typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation
+ typedef Traits traits; ///< Set traits
- typedef typename bucket_type::value_type value_type; ///< type of value to be stored in the set
- typedef typename bucket_type::key_comparator key_comparator; ///< key comparing functor
- typedef typename bucket_type::disposer disposer; ///< Node disposer functor
+ typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the set
+ typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
+ typedef typename ordered_list::disposer disposer; ///< Node disposer functor
+ typedef typename ordered_list::stat stat; ///< Internal statistics
/// Hash functor for \p value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
- protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
+ "atomicity::empty_item_counter is not allowed as a item counter");
- private:
+ protected:
//@cond
- const size_t m_nHashBitmask;
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
+
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
+
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+
+ hash m_HashFunctor; ///< Hash functor
+ const size_t m_nHashBitmask;
+ internal_bucket_type * m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ typename bucket_stat::stat m_Stat; ///< Internal statistics
//@endcond
protected:
/// Returns the bucket (ordered list) for \p key
template <typename Q>
- bucket_type& bucket( Q const & key )
+ internal_bucket_type& bucket( Q const & key )
{
return m_Buckets[ hash_value( key ) ];
}
};
\endcode
*/
- typedef michael_set::details::iterator< bucket_type, false > iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, false > iterator;
/// Const forward iterator
/**
For iterator's features and requirements see \ref iterator
*/
- typedef michael_set::details::iterator< bucket_type, true > const_iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator;
/// Returns a forward iterator addressing the first element in a set
/**
size_t nMaxItemCount, ///< estimation of max item count in the hash set
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket
) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count() ) )
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
- "atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash set object and destroys it
~MichaelHashSet()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node
return m_nHashBitmask + 1;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
+ private:
+ //@cond
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+ //@endcond
};
}} // namespace cds::intrusive
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_RCU_H
#define CDSLIB_INTRUSIVE_MICHAEL_SET_RCU_H
#include <cds/intrusive/details/michael_set_base.h>
-#include <cds/details/allocator.h>
namespace cds { namespace intrusive {
class MichaelHashSet< cds::urcu::gc< RCU >, OrderedList, Traits >
{
public:
- typedef cds::urcu::gc< RCU > gc; ///< RCU schema
- typedef OrderedList bucket_type; ///< type of ordered list used as a bucket implementation
- typedef Traits traits; ///< Set traits
+ typedef cds::urcu::gc< RCU > gc; ///< RCU schema
+ typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation
+ typedef Traits traits; ///< Set traits
- typedef typename bucket_type::value_type value_type ; ///< type of value stored in the list
- typedef typename bucket_type::key_comparator key_comparator ; ///< key comparing functor
- typedef typename bucket_type::disposer disposer ; ///< Node disposer functor
+ typedef typename ordered_list::value_type value_type; ///< type of value stored in the list
+ typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
+ typedef typename ordered_list::disposer disposer; ///< Node disposer functor
+ typedef typename ordered_list::stat stat; ///< Internal statistics
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
-
- typedef typename bucket_type::rcu_lock rcu_lock; ///< RCU scoped lock
- typedef typename bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
- typedef typename bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+ typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
/// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
- static CDS_CONSTEXPR const bool c_bExtractLockExternal = bucket_type::c_bExtractLockExternal;
+ static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
- protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
- private:
- //@cond
- const size_t m_nHashBitmask;
- //@endcond
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
+ "atomicity::empty_item_counter is not allowed as a item counter");
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename Q>
- size_t hash_value( Q const& key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
- /// Returns the bucket (ordered list) for \p key
- template <typename Q>
- bucket_type& bucket( Q const& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
- template <typename Q>
- bucket_type const& bucket( Q const& key ) const
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
+
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ //@endcond
+
+ public:
+ typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
+ typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+
+ private:
+ //@cond
+ hash m_HashFunctor; ///< Hash functor
+ size_t const m_nHashBitmask;
+ internal_bucket_type* m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ typename bucket_stat::stat m_Stat; ///< Internal statistics
//@endcond
public:
};
\endcode
*/
- typedef michael_set::details::iterator< bucket_type, false > iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, false > iterator;
/// Const forward iterator
/**
For iterator's features and requirements see \ref iterator
*/
- typedef michael_set::details::iterator< bucket_type, true > const_iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator;
/// Returns a forward iterator addressing the first element in a set
/**
size_t nMaxItemCount, ///< estimation of max item count in the hash set
size_t nLoadFactor ///< load factor: average size of the bucket
) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count() ) )
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
- "atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clear hash set and destroy it
~MichaelHashSet()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node
unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
- Depends on \p bucket_type you should or should not lock RCU before calling of this function:
+ Depends on \p ordered_list you should or should not lock RCU before calling of this function:
- for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked
- for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked
/** \anchor cds_intrusive_MichaelHashSet_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
If \p key is not found it returns \p nullptr.
- Note the type of returned value depends on underlying \p bucket_type.
+ Note the type of returned value depends on underlying \p ordered_list.
For details, see documentation of ordered list you use.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
return m_nHashBitmask + 1;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
+ private:
+ //@cond
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( Q const& key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename Q>
+ internal_bucket_type& bucket( Q const& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ template <typename Q>
+ internal_bucket_type const& bucket( Q const& key ) const
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ //@endcond
};
}} // namespace cds::intrusive
#endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H
-
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_MSPRIORITY_QUEUE_H
struct stat {
typedef Counter event_counter ; ///< Event counter type
- event_counter m_nPushCount ; ///< Count of success push operation
- event_counter m_nPopCount ; ///< Count of success pop operation
- event_counter m_nPushFailCount ; ///< Count of failed ("the queue is full") push operation
- event_counter m_nPopFailCount ; ///< Count of failed ("the queue is empty") pop operation
- event_counter m_nPushHeapifySwapCount ; ///< Count of item swapping when heapifying in push
- event_counter m_nPopHeapifySwapCount ; ///< Count of item swapping when heapifying in pop
+ event_counter m_nPushCount; ///< Count of success push operation
+ event_counter m_nPopCount; ///< Count of success pop operation
+ event_counter m_nPushFailCount; ///< Count of failed ("the queue is full") push operation
+ event_counter m_nPopFailCount; ///< Count of failed ("the queue is empty") pop operation
+ event_counter m_nPushHeapifySwapCount; ///< Count of item swapping when heapifying in push
+ event_counter m_nPopHeapifySwapCount; ///< Count of item swapping when heapifying in pop
+ event_counter m_nItemMovedTop; ///< Count of events when \p push() encountered that inserted item was moved to top by a concurrent \p pop()
+ event_counter m_nItemMovedUp; ///< Count of events when \p push() encountered that inserted item was moved upwards by a concurrent \p pop()
+ event_counter m_nPushEmptyPass; ///< Count of empty pass during heapify via concurrent operations
//@cond
void onPushSuccess() { ++m_nPushCount ;}
void onPopFailed() { ++m_nPopFailCount ;}
void onPushHeapifySwap() { ++m_nPushHeapifySwapCount ;}
void onPopHeapifySwap() { ++m_nPopHeapifySwapCount ;}
+
+ void onItemMovedTop() { ++m_nItemMovedTop ;}
+ void onItemMovedUp() { ++m_nItemMovedUp ;}
+ void onPushEmptyPass() { ++m_nPushEmptyPass ;}
//@endcond
};
/// MSPriorityQueue empty statistics
struct empty_stat {
//@cond
- void onPushSuccess() {}
- void onPopSuccess() {}
- void onPushFailed() {}
- void onPopFailed() {}
- void onPushHeapifySwap() {}
- void onPopHeapifySwap() {}
+ void onPushSuccess() const {}
+ void onPopSuccess() const {}
+ void onPushFailed() const {}
+ void onPopFailed() const {}
+ void onPushHeapifySwap() const {}
+ void onPopHeapifySwap() const {}
+
+ void onItemMovedTop() const {}
+ void onItemMovedUp() const {}
+ void onPushEmptyPass() const {}
//@endcond
};
+ /// Monotonic item counter, see \p traits::item_counter for explanation
+ class monotonic_counter
+ {
+ //@cond
+ public:
+ typedef size_t counter_type;
+
+ monotonic_counter()
+ : m_nCounter(0)
+ {}
+
+ size_t inc()
+ {
+ return ++m_nCounter;
+ }
+
+ size_t dec()
+ {
+ return m_nCounter--;
+ }
+
+ size_t value() const
+ {
+ return m_nCounter;
+ }
+
+ private:
+ size_t m_nCounter;
+ //@endcond
+ };
+
/// MSPriorityQueue traits
struct traits {
/// Storage type
*/
typedef opt::none less;
- /// Type of mutual-exclusion lock
+ /// Type of mutual-exclusion lock. The lock is not need to be recursive.
typedef cds::sync::spin lock_type;
/// Back-off strategy
or any other with interface like \p %mspriority_queue::stat
*/
typedef empty_stat stat;
+
+ /// Item counter type
+ /**
+ Two type are possible:
+ - \p cds::bitop::bit_reverse_counter - a counter described in <a href="http://www.research.ibm.com/people/m/michael/ipl-1996.pdf">original paper</a>,
+ which was developed for reducing lock contention. However, bit-reversing technigue requires more memory than classic heapifying algorithm
+ because of sparsing of elements: for priority queue of max size \p N the bit-reversing technique requires array size up to 2<sup>K</sup>
+ where \p K - the nearest power of two such that <tt>2<sup>K</sup> >= N</tt>.
+ - \p mspriority_queue::monotonic_counter - a classic monotonic item counter. This counter can lead to false sharing under high contention.
+ By the other hand, for priority queue of max size \p N it requires \p N array size.
+
+ By default, \p MSPriorityQueue uses \p %cds::bitop::bit_reverse_counter as described in original paper.
+ */
+ typedef cds::bitop::bit_reverse_counter<> item_counter;
};
/// Metafunction converting option list to traits
- \p opt::lock_type - lock type. Default is \p cds::sync::spin
- \p opt::back_off - back-off strategy. Default is \p cds::backoff::yield
- \p opt::stat - internal statistics. Available types: \p mspriority_queue::stat, \p mspriority_queue::empty_stat (the default, no overhead)
+ - \p opt::item_counter - an item counter type for \p MSPriorityQueue.
+ Available type: \p cds::bitop::bit_reverse_counter, \p mspriority_queue::monotonic_counter. See \p traits::item_counter for details.
*/
template <typename... Options>
struct make_traits {
typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
# endif
- typedef typename traits::lock_type lock_type; ///< heap's size lock type
- typedef typename traits::back_off back_off; ///< Back-off strategy
- typedef typename traits::stat stat; ///< internal statistics type
+ typedef typename traits::lock_type lock_type; ///< heap's size lock type
+ typedef typename traits::back_off back_off; ///< Back-off strategy
+ typedef typename traits::stat stat; ///< internal statistics type, see \p mspriority_queue::traits::stat
+ typedef typename traits::item_counter item_counter;///< Item counter type, see \p mspriority_queue::traits::item_counter
protected:
//@cond
typedef typename traits::buffer::template rebind<node>::other buffer_type ; ///< Heap array buffer type
//@cond
- typedef cds::bitop::bit_reverse_counter<> item_counter_type;
- typedef typename item_counter_type::counter_type counter_type;
+ typedef typename item_counter::counter_type counter_type;
//@endcond
protected:
- item_counter_type m_ItemCounter ; ///< Item counter
+ item_counter m_ItemCounter ; ///< Item counter
mutable lock_type m_Lock ; ///< Heap's size lock
buffer_type m_Heap ; ///< Heap array
stat m_Stat ; ///< internal statistics accumulator
node& refNode = m_Heap[i];
refNode.lock();
m_Lock.unlock();
+ assert( refNode.m_nTag == tag_type( Empty ));
+ assert( refNode.m_pVal == nullptr );
refNode.m_pVal = &val;
refNode.m_nTag = curId;
refNode.unlock();
- // Move item towards top of the heap while it has higher priority than parent
+ // Move item towards top of heap while it has a higher priority than its parent
heapify_after_push( i, curId );
m_Stat.onPushSuccess();
*/
value_type * pop()
{
+ node& refTop = m_Heap[1];
+
m_Lock.lock();
if ( m_ItemCounter.value() == 0 ) {
// the heap is empty
m_Stat.onPopFailed();
return nullptr;
}
- counter_type nBottom = m_ItemCounter.reversed_value();
- m_ItemCounter.dec();
- // Since m_Heap[0] is not used, capacity() returns m_Heap.capacity() - 1
- // Consequently, "<=" is here
- assert( nBottom <= capacity() );
+ counter_type nBottom = m_ItemCounter.dec();
+ assert( nBottom < m_Heap.capacity() );
assert( nBottom > 0 );
- node& refBottom = m_Heap[ nBottom ];
+ refTop.lock();
+ if ( nBottom == 1 ) {
+ refTop.m_nTag = tag_type( Empty );
+ value_type * pVal = refTop.m_pVal;
+ refTop.m_pVal = nullptr;
+ refTop.unlock();
+ m_Lock.unlock();
+ m_Stat.onPopSuccess();
+ return pVal;
+ }
+
+ node& refBottom = m_Heap[nBottom];
refBottom.lock();
m_Lock.unlock();
refBottom.m_nTag = tag_type(Empty);
refBottom.m_pVal = nullptr;
refBottom.unlock();
- node& refTop = m_Heap[ 1 ];
- refTop.lock();
if ( refTop.m_nTag == tag_type(Empty) ) {
// nBottom == nTop
refTop.unlock();
refTop.m_nTag = tag_type( Available );
// refTop will be unlocked inside heapify_after_pop
- heapify_after_pop( 1, &refTop );
+ heapify_after_pop( &refTop );
m_Stat.onPopSuccess();
return pVal;
template <typename Func>
void clear_with( Func f )
{
- while ( !empty() ) {
- value_type * pVal = pop();
- if ( pVal )
- f( *pVal );
- }
+ value_type * pVal;
+ while (( pVal = pop()) != nullptr )
+ f( *pVal );
}
/// Checks is the priority queue is empty
size_t size() const
{
std::unique_lock<lock_type> l( m_Lock );
- size_t nSize = (size_t) m_ItemCounter.value();
- return nSize;
+ return static_cast<size_t>( m_ItemCounter.value());
}
/// Return capacity of the priority queue
i = 0;
}
}
- else if ( refParent.m_nTag == tag_type(Empty) )
+ else if ( refParent.m_nTag == tag_type( Empty ) ) {
+ m_Stat.onItemMovedTop();
i = 0;
- else if ( refItem.m_nTag != curId )
+ }
+ else if ( refItem.m_nTag != curId ) {
+ m_Stat.onItemMovedUp();
i = nParent;
- else
+ }
+ else {
+ m_Stat.onPushEmptyPass();
bProgress = false;
+ }
refItem.unlock();
refParent.unlock();
}
}
- void heapify_after_pop( counter_type nParent, node * pParent )
+ void heapify_after_pop( node * pParent )
{
key_comparator cmp;
+ counter_type const nCapacity = m_Heap.capacity();
+
+ counter_type nParent = 1;
+ for ( counter_type nChild = nParent * 2; nChild < nCapacity; nChild *= 2 ) {
+ node* pChild = &m_Heap[ nChild ];
+ pChild->lock();
- while ( nParent < m_Heap.capacity() / 2 ) {
- counter_type nLeft = nParent * 2;
- counter_type nRight = nLeft + 1;
- node& refLeft = m_Heap[nLeft];
- node& refRight = m_Heap[nRight];
- refLeft.lock();
- refRight.lock();
-
- counter_type nChild;
- node * pChild;
- if ( refLeft.m_nTag == tag_type(Empty) ) {
- refRight.unlock();
- refLeft.unlock();
+ if ( pChild->m_nTag == tag_type( Empty )) {
+ pChild->unlock();
break;
}
- else if ( refRight.m_nTag == tag_type(Empty) || cmp( *refLeft.m_pVal, *refRight.m_pVal ) > 0 ) {
- refRight.unlock();
- nChild = nLeft;
- pChild = &refLeft;
- }
- else {
- refLeft.unlock();
- nChild = nRight;
- pChild = &refRight;
+
+ counter_type const nRight = nChild + 1;
+ if ( nRight < nCapacity ) {
+ node& refRight = m_Heap[nRight];
+ refRight.lock();
+
+ if ( refRight.m_nTag != tag_type( Empty ) && cmp( *refRight.m_pVal, *pChild->m_pVal ) > 0 ) {
+ // get right child
+ pChild->unlock();
+ nChild = nRight;
+ pChild = &refRight;
+ }
+ else
+ refRight.unlock();
}
- // If child has higher priority that parent then swap
+ // If child has higher priority than parent then swap
// Otherwise stop
if ( cmp( *pChild->m_pVal, *pParent->m_pVal ) > 0 ) {
std::swap( pParent->m_nTag, pChild->m_nTag );
class uninitialized_dynamic_buffer
{
public:
- typedef T value_type; ///< Value type
+ typedef T value_type; ///< Value type
+ typedef Alloc allocator; ///< Allocator type;
static CDS_CONSTEXPR const bool c_bExp2 = Exp2; ///< \p Exp2 flag
/// Rebind buffer for other template parameters
- template <typename Q, typename Alloc2=Alloc, bool Exp22 = c_bExp2>
+ template <typename Q, typename Alloc2= allocator, bool Exp22 = c_bExp2>
struct rebind {
typedef uninitialized_dynamic_buffer<Q, Alloc2, Exp22> other; ///< Rebinding result type
};
//@cond
- typedef typename Alloc::template rebind<value_type>::other allocator_type;
+ typedef typename allocator::template rebind<value_type>::other allocator_type;
//@endcond
private:
class initialized_dynamic_buffer
{
public:
- typedef T value_type; ///< Value type
+ typedef T value_type; ///< Value type
+ typedef Alloc allocator; ///< Allocator type
static CDS_CONSTEXPR const bool c_bExp2 = Exp2; ///< \p Exp2 flag
/// Rebind buffer for other template parameters
- template <typename Q, typename Alloc2=Alloc, bool Exp22 = c_bExp2>
+ template <typename Q, typename Alloc2= allocator, bool Exp22 = c_bExp2>
struct rebind {
typedef initialized_dynamic_buffer<Q, Alloc2, Exp22> other; ///< Rebinding result type
};
//@cond
- typedef cds::details::Allocator<value_type, Alloc> allocator_type;
+ typedef cds::details::Allocator<value_type, allocator> allocator_type;
//@endcond
private:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_SYNC_SPINLOCK_H
[1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
No serialization performed - any of waiting threads may owns the spin-lock.
- This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
- The method unlock() can call any thread
+ This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
+ The method \p unlock() can call any thread
DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
- double lock attempt encountered by same thread (deadlock)
- unlock by another thread
- If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
+ If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
thread and repeats lock attempts later
Template parameters:
- - @p Backoff backoff strategy. Used when spin lock is locked
+ - \p Backoff - backoff strategy. Used when spin lock is locked
*/
template <typename Backoff >
class spin_lock
return !bCurrent;
}
- /// Try to lock the object, repeat @p nTryCount times if failed
+ /// Try to lock the object, repeat \p nTryCount times if failed
/**
Returns \p true if locking is succeeded
otherwise (if the spin is already locked) returns \p false
Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
Template parameters:
- - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
- - @p Backoff backoff strategy. Used when spin lock is locked
+ - \p Integral one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
+ - \p Backoff backoff strategy. Used when spin lock is locked
*/
template <typename Integral, class Backoff>
class reentrant_spin_lock
private:
atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
- thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
+ thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
private:
//@cond
return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
}
- /// Try to lock the spin-lock (synonym for \ref try_lock)
+ /// Try to lock the spin-lock (synonym for \p try_lock())
bool try_lock() CDS_NOEXCEPT
{
thread_id tid = OS::get_current_thread_id();
}
}
- /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
+ /// Unlock the spin-lock. Return \p true if the current thread is owner of spin-lock \p false otherwise
bool unlock() CDS_NOEXCEPT
{
if ( is_taken( OS::get_current_thread_id() ) ) {
are removed.
- Fixed: use-after-free bug in VyukovMPMCCycleQueue internal buffer.
To prevent this bug the queue uses an uninitialized buffer now.
+ - Fixed: rare priority inversion bug in MSPriorityQueue
- Added: for minimizing runtime of stress test the detail level for some test is added.
Command line argument --detail-level=N specifies what test should be ran: each
test with level not great than N will be ran. Instead of command line arg
<ClInclude Include="..\..\..\cds\intrusive\cuckoo_set.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\details\base.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\details\ellen_bintree_base.h" />\r
+ <ClInclude Include="..\..\..\cds\intrusive\details\iterable_list_base.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\details\lazy_list_base.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\details\michael_list_base.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\details\michael_set_base.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\free_list.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\free_list_tagged.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\impl\ellen_bintree.h" />\r
+ <ClInclude Include="..\..\..\cds\intrusive\impl\iterable_list.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\impl\lazy_list.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\impl\michael_list.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\impl\feldman_hashset.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\impl\skip_list.h" />\r
+ <ClInclude Include="..\..\..\cds\intrusive\iterable_list_dhp.h" />\r
+ <ClInclude Include="..\..\..\cds\intrusive\iterable_list_hp.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\lazy_list_dhp.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\lazy_list_rcu.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\michael_list_dhp.h" />\r
<Filter Include="Header Files\cds\intrusive">\r
<UniqueIdentifier>{7226715d-6777-4c01-8e66-83b3885c00c1}</UniqueIdentifier>\r
</Filter>\r
- <Filter Include="Header Files\cds\container">\r
- <UniqueIdentifier>{84ca9e83-f6c9-4503-a45f-14f08317fd70}</UniqueIdentifier>\r
- </Filter>\r
- <Filter Include="Header Files\cds\container\details">\r
- <UniqueIdentifier>{4b79fe31-4f6c-4e05-8910-1151a26d51f3}</UniqueIdentifier>\r
- </Filter>\r
<Filter Include="Resource Files">\r
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>\r
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx</Extensions>\r
<Filter Include="Header Files\cds\intrusive\striped_set">\r
<UniqueIdentifier>{560b4d4c-71e1-443c-942e-dcc5a275c7c2}</UniqueIdentifier>\r
</Filter>\r
- <Filter Include="Header Files\cds\container\striped_map">\r
- <UniqueIdentifier>{6530b757-5bb7-4de0-b1c9-019acc8183ba}</UniqueIdentifier>\r
- </Filter>\r
- <Filter Include="Header Files\cds\container\striped_set">\r
- <UniqueIdentifier>{d3f68c37-8c36-448e-9d4c-cd89a940d275}</UniqueIdentifier>\r
- </Filter>\r
<Filter Include="Header Files\cds\urcu">\r
<UniqueIdentifier>{32754dfc-727a-42ff-b243-9a8510bf5c4e}</UniqueIdentifier>\r
</Filter>\r
<Filter Include="Header Files\cds\intrusive\impl">\r
<UniqueIdentifier>{00a14aa8-3035-4b56-bc86-442ca9bf8f44}</UniqueIdentifier>\r
</Filter>\r
- <Filter Include="Header Files\cds\container\impl">\r
- <UniqueIdentifier>{0a2328b4-ff6f-4afb-8de0-9884ae172fa9}</UniqueIdentifier>\r
- </Filter>\r
<Filter Include="Header Files\cds\gc\impl">\r
<UniqueIdentifier>{3195cce2-1710-4b79-a1cf-6c7cea085fa3}</UniqueIdentifier>\r
</Filter>\r
<Filter Include="Header Files\cds\algo\flat_combining">\r
<UniqueIdentifier>{fe703227-44ad-4ad6-bae4-b6c9f5c65355}</UniqueIdentifier>\r
</Filter>\r
+ <Filter Include="Header Files\cds\container">\r
+ <UniqueIdentifier>{84ca9e83-f6c9-4503-a45f-14f08317fd70}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Header Files\cds\container\details">\r
+ <UniqueIdentifier>{4b79fe31-4f6c-4e05-8910-1151a26d51f3}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Header Files\cds\container\striped_map">\r
+ <UniqueIdentifier>{6530b757-5bb7-4de0-b1c9-019acc8183ba}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Header Files\cds\container\striped_set">\r
+ <UniqueIdentifier>{d3f68c37-8c36-448e-9d4c-cd89a940d275}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Header Files\cds\container\impl">\r
+ <UniqueIdentifier>{0a2328b4-ff6f-4afb-8de0-9884ae172fa9}</UniqueIdentifier>\r
+ </Filter>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="..\..\..\src\dllmain.cpp">\r
<ClInclude Include="..\..\..\cds\algo\flat_combining\kernel.h">\r
<Filter>Header Files\cds\algo\flat_combining</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\cds\intrusive\details\iterable_list_base.h">\r
+ <Filter>Header Files\cds\intrusive\details</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\intrusive\impl\iterable_list.h">\r
+ <Filter>Header Files\cds\intrusive\impl</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\intrusive\iterable_list_dhp.h">\r
+ <Filter>Header Files\cds\intrusive</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\intrusive\iterable_list_hp.h">\r
+ <Filter>Header Files\cds\intrusive</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
</ProjectConfiguration>\r
</ItemGroup>\r
<ItemGroup>\r
+ <ClCompile Include="..\..\..\test\unit\intrusive-set\intrusive_michael_iterable_dhp.cpp" />\r
+ <ClCompile Include="..\..\..\test\unit\intrusive-set\intrusive_michael_iterable_hp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\main.cpp" />\r
<ClCompile Include="..\..\..\test\unit\intrusive-set\intrusive_feldman_hashset_dhp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\intrusive-set\intrusive_feldman_hashset_hp.cpp" />\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_feldman_hashset.h" />\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_feldman_hashset_hp.h" />\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_feldman_hashset_rcu.h" />\r
+ <ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_michael_iterable.h" />\r
+ <ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_michael_iterable_hp.h" />\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_michael_lazy_rcu.h" />\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_michael_michael_rcu.h" />\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_set.h" />\r
<Optimization>Disabled</Optimization>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<Optimization>Disabled</Optimization>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<Optimization>Disabled</Optimization>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<Optimization>Disabled</Optimization>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<IntrinsicFunctions>true</IntrinsicFunctions>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<IntrinsicFunctions>true</IntrinsicFunctions>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<ClCompile Include="..\..\..\test\unit\intrusive-set\intrusive_feldman_hashset_rcu_sht.cpp">\r
<Filter>Source Files\FeldmanHashSet</Filter>\r
</ClCompile>\r
+ <ClCompile Include="..\..\..\test\unit\intrusive-set\intrusive_michael_iterable_hp.cpp">\r
+ <Filter>Source Files\MichaelSet</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\..\test\unit\intrusive-set\intrusive_michael_iterable_dhp.cpp">\r
+ <Filter>Source Files\MichaelSet</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
<ItemGroup>\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_set.h">\r
<ClInclude Include="..\..\..\test\unit\intrusive-set\test_michael_michael_rcu.h">\r
<Filter>Header Files</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_michael_iterable.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\test\unit\intrusive-set\test_intrusive_michael_iterable_hp.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
</ProjectConfiguration>\r
</ItemGroup>\r
<ItemGroup>\r
+ <ClInclude Include="..\..\..\test\unit\list\test_intrusive_iterable_list.h" />\r
+ <ClInclude Include="..\..\..\test\unit\list\test_intrusive_iterable_list_hp.h" />\r
<ClInclude Include="..\..\..\test\unit\list\test_intrusive_lazy_rcu.h" />\r
<ClInclude Include="..\..\..\test\unit\list\test_intrusive_list.h" />\r
<ClInclude Include="..\..\..\test\unit\list\test_intrusive_list_hp.h" />\r
<ClInclude Include="..\..\..\test\unit\list\test_michael_rcu.h" />\r
</ItemGroup>\r
<ItemGroup>\r
+ <ClCompile Include="..\..\..\test\unit\list\intrusive_iterable_dhp.cpp" />\r
+ <ClCompile Include="..\..\..\test\unit\list\intrusive_iterable_hp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\list\intrusive_lazy_dhp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\list\intrusive_lazy_hp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\list\intrusive_lazy_nogc.cpp" />\r
<ClInclude Include="..\..\..\test\unit\list\test_kv_lazy_rcu.h">\r
<Filter>Header Files</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\test\unit\list\test_intrusive_iterable_list_hp.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\test\unit\list\test_intrusive_iterable_list.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="..\..\..\test\unit\list\intrusive_michael_hp.cpp">\r
<ClCompile Include="..\..\..\test\unit\list\kv_lazy_rcu_sht.cpp">\r
<Filter>Source Files</Filter>\r
</ClCompile>\r
+ <ClCompile Include="..\..\..\test\unit\list\intrusive_iterable_hp.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\..\test\unit\list\intrusive_iterable_dhp.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
typedef cds_test::stress_fixture base_class;
protected:
- template <class PQueue>
- class Producer: public cds_test::thread
- {
- typedef cds_test::thread base_class;
-
- public:
- Producer( cds_test::thread_pool& pool, PQueue& queue )
- : base_class( pool )
- , m_Queue( queue )
- {}
-
- Producer( Producer& src )
- : base_class( src )
- , m_Queue( src.m_Queue )
- {}
-
- virtual thread * clone()
- {
- return new Producer( *this );
- }
-
- virtual void test()
- {
- typedef typename PQueue::value_type value_type;
- for ( array_type::const_iterator it = m_arr.begin(); it != m_arr.end(); ++it ) {
- if ( !m_Queue.push( value_type( *it ) ))
- ++m_nPushError;
- }
- }
-
- void prepare( size_t nStart, size_t nEnd )
- {
- m_arr.reserve( nEnd - nStart );
- for ( size_t i = nStart; i < nEnd; ++i )
- m_arr.push_back( i );
- shuffle( m_arr.begin(), m_arr.end() );
- }
-
- public:
- PQueue& m_Queue;
- size_t m_nPushError = 0;
-
- typedef std::vector<size_t> array_type;
- array_type m_arr;
- };
-
template <class PQueue>
class Consumer: public cds_test::thread
{
++m_nPopSuccess;
nPrevKey = val.key;
- while ( !m_Queue.empty() ) {
- if ( m_Queue.pop( val )) {
- ++m_nPopSuccess;
- if ( val.key > nPrevKey )
- ++m_nPopError;
- else if ( val.key == nPrevKey )
- ++m_nPopErrorEq;
- nPrevKey = val.key;
+ bool prevPopFailed = false;
+ while ( m_Queue.pop( val )) {
+ ++m_nPopSuccess;
+ if ( val.key > nPrevKey ) {
+ ++m_nPopError;
+ m_arrFailedPops.emplace_back( failed_pops{ nPrevKey, val.key, static_cast<size_t>(-1) } );
+ prevPopFailed = true;
+ }
+ else if ( val.key == nPrevKey ) {
+ ++m_nPopErrorEq;
+ m_arrFailedPops.emplace_back( failed_pops{ nPrevKey, val.key, static_cast<size_t>(-1) } );
}
- else
- ++m_nPopFailed;
+ else {
+ if ( prevPopFailed )
+ m_arrFailedPops.back().next_key = val.key;
+ prevPopFailed = false;
+ }
+ if ( nPrevKey > val.key )
+ nPrevKey = val.key;
}
+
}
else
++m_nPopFailed;
size_t m_nPopErrorEq = 0;
size_t m_nPopSuccess = 0;
size_t m_nPopFailed = 0;
+
+ struct failed_pops {
+ size_t prev_key;
+ size_t popped_key;
+ size_t next_key;
+ };
+ std::vector< failed_pops > m_arrFailedPops;
};
protected:
template <class PQueue>
void test( PQueue& q )
{
- size_t const nThreadItemCount = s_nQueueSize / s_nThreadCount;
- s_nQueueSize = nThreadItemCount * s_nThreadCount;
-
cds_test::thread_pool& pool = get_pool();
- propout() << std::make_pair( "thread_count", s_nThreadCount )
- << std::make_pair( "push_count", s_nQueueSize );
-
// push
{
- pool.add( new Producer<PQueue>( pool, q ), s_nThreadCount );
+ std::vector< size_t > arr;
+ arr.reserve( s_nQueueSize );
+ for ( size_t i = 0; i < s_nQueueSize; ++i )
+ arr.push_back( i );
+ shuffle( arr.begin(), arr.end() );
- size_t nStart = 0;
- for ( size_t i = 0; i < pool.size(); ++i ) {
- static_cast<Producer<PQueue>&>( pool.get(i) ).prepare( nStart, nStart + nThreadItemCount );
- nStart += nThreadItemCount;
+ size_t nPushError = 0;
+ typedef typename PQueue::value_type value_type;
+ for ( auto it = arr.begin(); it != arr.end(); ++it ) {
+ if ( !q.push( value_type( *it ) ))
+ ++nPushError;
}
-
- std::chrono::milliseconds duration = pool.run();
- propout() << std::make_pair( "producer_duration", duration );
+ s_nQueueSize -= nPushError;
}
+ propout() << std::make_pair( "thread_count", s_nThreadCount )
+ << std::make_pair( "push_count", s_nQueueSize );
+
// pop
{
- pool.clear();
pool.add( new Consumer<PQueue>( pool, q ), s_nThreadCount );
std::chrono::milliseconds duration = pool.run();
nTotalError += cons.m_nPopError;
nTotalErrorEq += cons.m_nPopErrorEq;
nTotalFailed += cons.m_nPopFailed;
+
+ if ( !cons.m_arrFailedPops.empty() ) {
+ std::cerr << "Priority violations, thread " << i;
+ for ( size_t k = 0; k < cons.m_arrFailedPops.size(); ++k ) {
+ std::cerr << "\n " << "prev_key=" << cons.m_arrFailedPops[k].prev_key << " popped_key=" << cons.m_arrFailedPops[k].popped_key;
+ if ( cons.m_arrFailedPops[k].next_key != static_cast<size_t>(-1) )
+ std::cerr << " next_key=" << cons.m_arrFailedPops[k].next_key;
+ else
+ std::cerr << " next_key unspecified";
+ }
+ std::cerr << std::endl;
+ }
}
propout()
<< std::make_pair( "error_priority_violation", nTotalError );
EXPECT_EQ( nTotalPopped, s_nQueueSize );
- EXPECT_EQ( nTotalError, 0 );
- EXPECT_EQ( nTotalErrorEq, 0 );
+ EXPECT_EQ( nTotalError, 0 ) << "priority violations";
+ EXPECT_EQ( nTotalErrorEq, 0 ) << "double key";
}
propout() << q.statistics();
TEST_F( fixture_t, pqueue_t ) \
{ \
typedef pqueue::Types<pqueue::simple_value>::pqueue_t pqueue_type; \
- pqueue_type pq( s_nQueueSize ); \
+ pqueue_type pq( s_nQueueSize + 1 ); \
test( pq ); \
}
- CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_less )
- CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_less_stat )
+ CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_bitreverse_less )
+ CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_bitreverse_less_stat )
+ CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_monotonic_less )
+ CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_monotonic_less_stat )
CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_cmp )
//CDSSTRESS_MSPriorityQueue( pqueue_pop, MSPriorityQueue_dyn_mutex ) // too slow
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSSTRESS_PQUEUE_TYPES_H
{};
typedef cc::MSPriorityQueue< Value, traits_MSPriorityQueue_static_mutex > MSPriorityQueue_static_mutex;
- struct traits_MSPriorityQueue_dyn_less : public
- cc::mspriority_queue::make_traits<
- co::buffer< co::v::initialized_dynamic_buffer< char > >
- >::type
- {};
- typedef cc::MSPriorityQueue< Value, traits_MSPriorityQueue_dyn_less > MSPriorityQueue_dyn_less;
+ struct traits_MSPriorityQueue_dyn: public cc::mspriority_queue::traits
+ {
+ typedef co::v::initialized_dynamic_buffer< char > buffer;
+ };
+
+ struct traits_MSPriorityQueue_dyn_bitreverse_less : public traits_MSPriorityQueue_dyn
+ {
+ typedef cds::bitop::bit_reverse_counter<> item_counter;
+ };
+ typedef cc::MSPriorityQueue< Value, traits_MSPriorityQueue_dyn_bitreverse_less > MSPriorityQueue_dyn_bitreverse_less;
+
+ struct traits_MSPriorityQueue_dyn_bitreverse_less_stat: public traits_MSPriorityQueue_dyn_bitreverse_less
+ {
+ typedef cc::mspriority_queue::stat<> stat;
+ };
+ typedef cc::MSPriorityQueue< Value, traits_MSPriorityQueue_dyn_bitreverse_less_stat > MSPriorityQueue_dyn_bitreverse_less_stat;
+
+ struct traits_MSPriorityQueue_dyn_monotonic_less: public traits_MSPriorityQueue_dyn
+ {
+ typedef cds::intrusive::mspriority_queue::monotonic_counter item_counter;
+ };
+ typedef cc::MSPriorityQueue< Value, traits_MSPriorityQueue_dyn_monotonic_less > MSPriorityQueue_dyn_monotonic_less;
+
+ struct traits_MSPriorityQueue_dyn_monotonic_less_stat: public traits_MSPriorityQueue_dyn_monotonic_less
+ {
+ typedef cc::mspriority_queue::stat<> stat;
+ };
+ typedef cc::MSPriorityQueue< Value, traits_MSPriorityQueue_dyn_monotonic_less_stat > MSPriorityQueue_dyn_monotonic_less_stat;
- struct traits_MSPriorityQueue_dyn_less_stat : public
- cc::mspriority_queue::make_traits <
- co::buffer< co::v::initialized_dynamic_buffer< char > >
- , co::stat < cc::mspriority_queue::stat<> >
- > ::type
- {};
- typedef cc::MSPriorityQueue< Value, traits_MSPriorityQueue_dyn_less_stat > MSPriorityQueue_dyn_less_stat;
struct traits_MSPriorityQueue_dyn_cmp : public
cc::mspriority_queue::make_traits <
<< CDSSTRESS_STAT_OUT( s, m_nPushFailCount )
<< CDSSTRESS_STAT_OUT( s, m_nPopFailCount )
<< CDSSTRESS_STAT_OUT( s, m_nPushHeapifySwapCount )
- << CDSSTRESS_STAT_OUT( s, m_nPopHeapifySwapCount );
+ << CDSSTRESS_STAT_OUT( s, m_nPopHeapifySwapCount )
+ << CDSSTRESS_STAT_OUT( s, m_nItemMovedTop )
+ << CDSSTRESS_STAT_OUT( s, m_nItemMovedUp )
+ << CDSSTRESS_STAT_OUT( s, m_nPushEmptyPass );
}
} // namespace cds_test
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "pqueue_type.h"
pqueue_type pq( s_nQueueSize ); \
test( pq ); \
}
- CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_less )
- CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_less_stat )
+ CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_bitreverse_less )
+ CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_bitreverse_less_stat )
+ CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_monotonic_less )
+ CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_monotonic_less_stat )
CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_cmp )
//CDSSTRESS_MSPriorityQueue( pqueue_push, MSPriorityQueue_dyn_mutex ) // too slow
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "pqueue_type.h"
pqueue_type pq( s_nQueueSize ); \
test( pq ); \
}
- CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_less )
- CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_less_stat )
+ CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_bitreverse_less )
+ CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_bitreverse_less_stat )
+ CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_monotonic_less )
+ CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_monotonic_less_stat )
CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_cmp )
- //CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_mutex ) too slow
+ //CDSSTRESS_MSPriorityQueue( pqueue_push_pop, MSPriorityQueue_dyn_mutex ) // too slow
#define CDSSTRESS_MSPriorityQueue_static( fixture_t, pqueue_t ) \
TEST_F( fixture_t, pqueue_t ) \
intrusive_feldman_hashset_rcu_gpt.cpp
intrusive_feldman_hashset_rcu_shb.cpp
intrusive_feldman_hashset_rcu_sht.cpp
+ intrusive_michael_iterable_dhp.cpp
+ intrusive_michael_iterable_hp.cpp
intrusive_michael_lazy_hp.cpp
intrusive_michael_lazy_dhp.cpp
intrusive_michael_lazy_nogc.cpp
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "test_intrusive_michael_iterable_hp.h"
+
+#include <cds/intrusive/iterable_list_dhp.h>
+#include <cds/intrusive/michael_set.h>
+
+namespace {
+ namespace ci = cds::intrusive;
+ typedef cds::gc::DHP gc_type;
+
+ class IntrusiveMichaelIterableSet_DHP : public cds_test::intrusive_set_hp
+ {
+ protected:
+ typedef cds_test::intrusive_set_hp base_class;
+
+ protected:
+ void SetUp()
+ {
+ struct list_traits : public ci::iterable_list::traits
+ {};
+ typedef ci::IterableList< gc_type, item_type, list_traits > list_type;
+ typedef ci::MichaelHashSet< gc_type, list_type > set_type;
+
+ cds::gc::dhp::GarbageCollector::Construct( 16, set_type::c_nHazardPtrCount );
+ cds::threading::Manager::attachThread();
+ }
+
+ void TearDown()
+ {
+ cds::threading::Manager::detachThread();
+ cds::gc::dhp::GarbageCollector::Destruct();
+ }
+ };
+
+
+ TEST_F( IntrusiveMichaelIterableSet_DHP, cmp )
+ {
+ typedef ci::IterableList< gc_type
+ , item_type
+ ,ci::iterable_list::make_traits<
+ ci::opt::compare< cmp<item_type> >
+ ,ci::opt::disposer< mock_disposer >
+ >::type
+ > bucket_type;
+
+ typedef ci::MichaelHashSet< gc_type, bucket_type,
+ ci::michael_set::make_traits<
+ ci::opt::hash< hash_int >
+ >::type
+ > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_DHP, less )
+ {
+ typedef ci::IterableList< gc_type
+ , item_type
+ ,ci::iterable_list::make_traits<
+ ci::opt::less< less<item_type> >
+ ,ci::opt::disposer< mock_disposer >
+ >::type
+ > bucket_type;
+
+ typedef ci::MichaelHashSet< gc_type, bucket_type,
+ ci::michael_set::make_traits<
+ ci::opt::hash< hash_int >
+ >::type
+ > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_DHP, cmpmix )
+ {
+ struct list_traits : public ci::iterable_list::traits
+ {
+ typedef base_class::less<item_type> less;
+ typedef cmp<item_type> compare;
+ typedef mock_disposer disposer;
+ };
+ typedef ci::IterableList< gc_type, item_type, list_traits > bucket_type;
+
+ struct set_traits : public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_DHP, stat )
+ {
+ struct list_traits: public ci::iterable_list::traits
+ {
+ typedef base_class::less<item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::iterable_list::stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_DHP, wrapped_stat )
+ {
+ struct list_traits: public ci::iterable_list::traits
+ {
+ typedef base_class::less<item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::iterable_list::wrapped_stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+} // namespace
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "test_intrusive_michael_iterable_hp.h"
+
+#include <cds/intrusive/iterable_list_hp.h>
+#include <cds/intrusive/michael_set.h>
+
+namespace {
+ namespace ci = cds::intrusive;
+ typedef cds::gc::HP gc_type;
+
+ class IntrusiveMichaelIterableSet_HP : public cds_test::intrusive_set_hp
+ {
+ protected:
+ typedef cds_test::intrusive_set_hp base_class;
+
+ protected:
+ void SetUp()
+ {
+ struct list_traits : public ci::iterable_list::traits
+ {};
+ typedef ci::IterableList< gc_type, item_type, list_traits > list_type;
+ typedef ci::MichaelHashSet< gc_type, list_type > set_type;
+
+ // +3 - for iterators
+ cds::gc::hp::GarbageCollector::Construct( set_type::c_nHazardPtrCount + 3, 1, 16 );
+ cds::threading::Manager::attachThread();
+ }
+
+ void TearDown()
+ {
+ cds::threading::Manager::detachThread();
+ cds::gc::hp::GarbageCollector::Destruct( true );
+ }
+ };
+
+
+ TEST_F( IntrusiveMichaelIterableSet_HP, cmp )
+ {
+ typedef ci::IterableList< gc_type
+ , item_type
+ ,ci::iterable_list::make_traits<
+ ci::opt::compare< cmp<item_type> >
+ ,ci::opt::disposer< mock_disposer >
+ >::type
+ > bucket_type;
+
+ typedef ci::MichaelHashSet< gc_type, bucket_type,
+ ci::michael_set::make_traits<
+ ci::opt::hash< hash_int >
+ >::type
+ > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_HP, less )
+ {
+ typedef ci::IterableList< gc_type
+ , item_type
+ ,ci::iterable_list::make_traits<
+ ci::opt::less< less<item_type> >
+ ,ci::opt::disposer< mock_disposer >
+ >::type
+ > bucket_type;
+
+ typedef ci::MichaelHashSet< gc_type, bucket_type,
+ ci::michael_set::make_traits<
+ ci::opt::hash< hash_int >
+ >::type
+ > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_HP, cmpmix )
+ {
+ struct list_traits : public ci::iterable_list::traits
+ {
+ typedef base_class::less<item_type> less;
+ typedef cmp<item_type> compare;
+ typedef mock_disposer disposer;
+ };
+ typedef ci::IterableList< gc_type, item_type, list_traits > bucket_type;
+
+ struct set_traits : public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_HP, stat )
+ {
+ struct list_traits: public ci::iterable_list::traits
+ {
+ typedef base_class::less<item_type> less;
+ typedef cmp<item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::iterable_list::stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelIterableSet_HP, wrapped_stat )
+ {
+ struct list_traits: public ci::iterable_list::traits
+ {
+ typedef base_class::less<item_type> less;
+ typedef cmp<item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::iterable_list::wrapped_stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_set_hp.h"
test( s );
}
+ TEST_F( IntrusiveMichaelLazySet_DHP, base_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelLazySet_DHP, base_wrapped_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
TEST_F( IntrusiveMichaelLazySet_DHP, member_cmp )
{
test( s );
}
+ TEST_F( IntrusiveMichaelLazySet_DHP, member_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelLazySet_DHP, member_wrapped_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_set_hp.h"
test( s );
}
+ TEST_F( IntrusiveMichaelLazySet_HP, base_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<gc_type>, ci::opt::lock_type<std::mutex>> hook;
+ typedef cmp<base_mutex_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_mutex_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelLazySet_HP, base_wrapped_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef cmp<base_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
TEST_F( IntrusiveMichaelLazySet_HP, member_cmp )
{
test( s );
}
+ TEST_F( IntrusiveMichaelLazySet_HP, member_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<member_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelLazySet_HP, member_wrapped_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<member_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_set_nogc.h"
test( s );
}
+ TEST_F( IntrusiveMichaelLazySet_NoGC, base_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelLazySet_NoGC, base_wrapped_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
TEST_F( IntrusiveMichaelLazySet_NoGC, member_cmp )
{
test( s );
}
+ TEST_F( IntrusiveMichaelLazySet_NoGC, member_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelLazySet_NoGC, member_wrapped_stat )
+ {
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_set_hp.h"
test( s );
}
+ TEST_F( IntrusiveMichaelSet_DHP, base_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef cmp<base_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelSet_DHP, base_wrapped_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef cmp<base_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
TEST_F( IntrusiveMichaelSet_DHP, member_cmp )
{
test( s );
}
+ TEST_F( IntrusiveMichaelSet_DHP, member_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<member_item_type> less;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelSet_DHP, member_wrapped_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<member_item_type> less;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_set_hp.h"
}
+ TEST_F( IntrusiveMichaelSet_HP, base_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef cmp<base_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelSet_HP, base_wrapped_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
TEST_F( IntrusiveMichaelSet_HP, member_cmp )
{
typedef ci::MichaelList< gc_type
test( s );
}
+ TEST_F( IntrusiveMichaelSet_HP, member_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<member_item_type> less;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelSet_HP, member_wrapped_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<member_item_type> less;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_set_nogc.h"
test( s );
}
+ TEST_F( IntrusiveMichaelSet_NoGC, base_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelSet_NoGC, base_wrapped_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<gc_type>> hook;
+ typedef base_class::less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
TEST_F( IntrusiveMichaelSet_NoGC, member_cmp )
{
test( s );
}
+ TEST_F( IntrusiveMichaelSet_NoGC, member_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveMichaelSet_NoGC, member_wrapped_stat )
+ {
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<gc_type>> hook;
+ typedef cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< gc_type, bucket_type, set_traits > set_type;
+
+ set_type s( kSize, 2 );
+ test( s );
+ }
+
} // namespace
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_ITERABLE_H
+#define CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_ITERABLE_H
+
+#include <cds_test/check_size.h>
+#include <cds_test/fixture.h>
+
+#include <cds/opt/hash.h>
+#include <functional> // ref
+
+// forward declaration
+namespace cds { namespace intrusive {}}
+
+namespace cds_test {
+
+ namespace ci = cds::intrusive;
+ namespace co = cds::opt;
+
+ class intrusive_set: public fixture
+ {
+ public:
+ static size_t const kSize = 100;
+
+ struct stat
+ {
+ unsigned int nDisposeCount ; // count of disposer calling
+ unsigned int nFindCount ; // count of find-functor calling
+ unsigned int nUpdateNewCount;
+ unsigned int nUpdateCount;
+ mutable unsigned int nEraseCount;
+
+ stat()
+ {
+ clear_stat();
+ }
+
+ void clear_stat()
+ {
+ memset( this, 0, sizeof( *this ) );
+ }
+ };
+
+ struct item_type: public stat
+ {
+ int nKey;
+ int nVal;
+
+ item_type( int k )
+ : nKey(k)
+ , nVal(0)
+ {}
+
+ int key() const
+ {
+ return nKey;
+ }
+ };
+
+ struct hash_int {
+ size_t operator()( int i ) const
+ {
+ return co::v::hash<int>()( i );
+ }
+ template <typename Q>
+ size_t operator()( Q const& i ) const
+ {
+ return (*this)( i.key());
+ }
+ };
+
+ struct simple_item_counter {
+ size_t m_nCount;
+
+ simple_item_counter()
+ : m_nCount(0)
+ {}
+
+ size_t operator ++()
+ {
+ return ++m_nCount;
+ }
+
+ size_t operator --()
+ {
+ return --m_nCount;
+ }
+
+ void reset()
+ {
+ m_nCount = 0;
+ }
+
+ operator size_t() const
+ {
+ return m_nCount;
+ }
+
+ };
+
+
+ template <typename T>
+ struct less
+ {
+ bool operator ()(const T& v1, const T& v2 ) const
+ {
+ return v1.key() < v2.key();
+ }
+
+ template <typename Q>
+ bool operator ()(const T& v1, const Q& v2 ) const
+ {
+ return v1.key() < v2;
+ }
+
+ template <typename Q>
+ bool operator ()(const Q& v1, const T& v2 ) const
+ {
+ return v1 < v2.key();
+ }
+ };
+
+ template <typename T>
+ struct cmp {
+ int operator ()(const T& v1, const T& v2 ) const
+ {
+ if ( v1.key() < v2.key() )
+ return -1;
+ return v1.key() > v2.key() ? 1 : 0;
+ }
+
+ template <typename Q>
+ int operator ()(const T& v1, const Q& v2 ) const
+ {
+ if ( v1.key() < v2 )
+ return -1;
+ return v1.key() > v2 ? 1 : 0;
+ }
+
+ template <typename Q>
+ int operator ()(const Q& v1, const T& v2 ) const
+ {
+ if ( v1 < v2.key() )
+ return -1;
+ return v1 > v2.key() ? 1 : 0;
+ }
+ };
+
+ struct other_item {
+ int nKey;
+
+ explicit other_item( int k )
+ : nKey( k )
+ {}
+
+ int key() const
+ {
+ return nKey;
+ }
+ };
+
+ struct other_less {
+ template <typename Q, typename T>
+ bool operator()( Q const& lhs, T const& rhs ) const
+ {
+ return lhs.key() < rhs.key();
+ }
+ };
+
+ struct mock_disposer
+ {
+ template <typename T>
+ void operator ()( T * p )
+ {
+ ++p->nDisposeCount;
+ }
+ };
+
+ protected:
+ template <class Set>
+ void test( Set& s )
+ {
+ // Precondition: set is empty
+ // Postcondition: set is empty
+
+ ASSERT_TRUE( s.empty() );
+ ASSERT_CONTAINER_SIZE( s, 0 );
+ size_t const nSetSize = kSize;
+
+ typedef typename Set::value_type value_type;
+
+ std::vector< value_type > data;
+ std::vector< size_t> indices;
+ data.reserve( kSize );
+ indices.reserve( kSize );
+ for ( size_t key = 0; key < kSize; ++key ) {
+ data.push_back( value_type( static_cast<int>( key )));
+ indices.push_back( key );
+ }
+ shuffle( indices.begin(), indices.end() );
+
+ // insert/find
+ for ( auto idx : indices ) {
+ auto& i = data[ idx ];
+
+ ASSERT_FALSE( s.contains( i.nKey ));
+ ASSERT_FALSE( s.contains( i ));
+ ASSERT_FALSE( s.contains( other_item( i.key()), other_less()));
+ ASSERT_FALSE( s.find( i.nKey, []( value_type&, int ) {} ));
+ ASSERT_FALSE( s.find_with( other_item( i.key()), other_less(), []( value_type&, other_item const& ) {} ));
+ ASSERT_TRUE( s.find( i.nKey ) == s.end());
+ ASSERT_TRUE( s.find_with( other_item( i.key() ), other_less()) == s.end());
+
+ std::pair<bool, bool> updResult;
+
+ updResult = s.update( i, []( value_type&, value_type* )
+ {
+ ASSERT_TRUE( false );
+ }, false );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+
+ updResult = s.upsert( i, false );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+
+ switch ( i.key() % 4 ) {
+ case 0:
+ ASSERT_TRUE( s.insert( i ));
+ ASSERT_FALSE( s.insert( i ));
+ updResult = s.update( i, []( value_type& val, value_type* arg)
+ {
+ ASSERT_TRUE( arg != nullptr );
+ EXPECT_EQ( val.key(), arg->key() );
+ }, false );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+ break;
+ case 1:
+ EXPECT_EQ( i.nUpdateNewCount, 0 );
+ ASSERT_TRUE( s.insert( i, []( value_type& v ) { ++v.nUpdateNewCount;} ));
+ EXPECT_EQ( i.nUpdateNewCount, 1 );
+ ASSERT_FALSE( s.insert( i, []( value_type& v ) { ++v.nUpdateNewCount;} ) );
+ EXPECT_EQ( i.nUpdateNewCount, 1 );
+ i.nUpdateNewCount = 0;
+ break;
+ case 2:
+ updResult = s.update( i, []( value_type& /*val*/, value_type* arg )
+ {
+ EXPECT_TRUE( arg == nullptr );
+ });
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
+ break;
+ case 3:
+ updResult = s.upsert( i );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
+ break;
+ }
+
+ ASSERT_TRUE( s.contains( i.nKey ) );
+ ASSERT_TRUE( s.contains( i ) );
+ ASSERT_TRUE( s.contains( other_item( i.key() ), other_less()));
+ EXPECT_EQ( i.nFindCount, 0 );
+ ASSERT_TRUE( s.find( i.nKey, []( value_type& v, int ) { ++v.nFindCount; } ));
+ EXPECT_EQ( i.nFindCount, 1 );
+ ASSERT_TRUE( s.find_with( other_item( i.key() ), other_less(), []( value_type& v, other_item const& ) { ++v.nFindCount; } ));
+ EXPECT_EQ( i.nFindCount, 2 );
+ ASSERT_TRUE( s.find( i.nKey ) != s.end() );
+ ASSERT_TRUE( s.find_with( other_item( i.key() ), other_less() ) != s.end() );
+ EXPECT_EQ( s.find( i.nKey )->nKey, i.key() );
+ EXPECT_EQ( s.find_with( other_item( i.key() ), other_less())->nKey, i.key() );
+ }
+ ASSERT_FALSE( s.empty() );
+ ASSERT_CONTAINER_SIZE( s, nSetSize );
+
+ std::for_each( data.begin(), data.end(), []( value_type& v ) { v.clear_stat(); });
+
+ // erase
+ shuffle( indices.begin(), indices.end() );
+ for ( auto idx : indices ) {
+ auto& i = data[ idx ];
+
+ ASSERT_TRUE( s.contains( i.nKey ) );
+ ASSERT_TRUE( s.contains( i ) );
+ ASSERT_TRUE( s.contains( other_item( i.key() ), other_less() ) );
+ EXPECT_EQ( i.nFindCount, 0 );
+ ASSERT_TRUE( s.find( i.nKey, []( value_type& v, int ) { ++v.nFindCount; } ) );
+ EXPECT_EQ( i.nFindCount, 1 );
+ ASSERT_TRUE( s.find_with( other_item( i.key() ), other_less(), []( value_type& v, other_item const& ) { ++v.nFindCount; } ) );
+ EXPECT_EQ( i.nFindCount, 2 );
+ ASSERT_TRUE( s.find( i.nKey ) != s.end() );
+ ASSERT_TRUE( s.find_with( other_item( i.key() ), other_less()) != s.end() );
+ EXPECT_EQ( s.find( i.nKey )->nKey, i.key() );
+ EXPECT_EQ( s.find_with( other_item( i.key() ), other_less())->nKey, i.key() );
+
+
+ value_type v( i );
+ switch ( i.key() % 6 ) {
+ case 0:
+ ASSERT_FALSE( s.unlink( v ));
+ ASSERT_TRUE( s.unlink( i ));
+ ASSERT_FALSE( s.unlink( i ) );
+ break;
+ case 1:
+ ASSERT_TRUE( s.erase( i.key()));
+ ASSERT_FALSE( s.erase( i.key() ) );
+ break;
+ case 2:
+ ASSERT_TRUE( s.erase( v ));
+ ASSERT_FALSE( s.erase( v ) );
+ break;
+ case 3:
+ ASSERT_TRUE( s.erase_with( other_item( i.key()), other_less()));
+ ASSERT_FALSE( s.erase_with( other_item( i.key() ), other_less() ) );
+ break;
+ case 4:
+ EXPECT_EQ( i.nEraseCount, 0 );
+ ASSERT_TRUE( s.erase( v, []( value_type& val ) { ++val.nEraseCount; } ));
+ EXPECT_EQ( i.nEraseCount, 1 );
+ ASSERT_FALSE( s.erase( v, []( value_type& val ) { ++val.nEraseCount; } ));
+ EXPECT_EQ( i.nEraseCount, 1 );
+ break;
+ case 5:
+ EXPECT_EQ( i.nEraseCount, 0 );
+ ASSERT_TRUE( s.erase_with( other_item( i.key() ), other_less(), []( value_type& val ) { ++val.nEraseCount; } ));
+ EXPECT_EQ( i.nEraseCount, 1 );
+ ASSERT_FALSE( s.erase_with( other_item( i.key() ), other_less(), []( value_type& val ) { ++val.nEraseCount; } ));
+ EXPECT_EQ( i.nEraseCount, 1 );
+ break;
+ }
+
+ ASSERT_FALSE( s.contains( i.nKey ));
+ ASSERT_FALSE( s.contains( i ));
+ ASSERT_FALSE( s.contains( other_item( i.key()), other_less()));
+ ASSERT_FALSE( s.find( i.nKey, []( value_type&, int ) {} ));
+ ASSERT_FALSE( s.find_with( other_item( i.key()), other_less(), []( value_type&, other_item const& ) {} ));
+ ASSERT_TRUE( s.find( i.nKey ) == s.end() );
+ ASSERT_TRUE( s.find_with( other_item( i.key() ), other_less() ) == s.end() );
+ }
+ ASSERT_TRUE( s.empty() );
+ ASSERT_CONTAINER_SIZE( s, 0 );
+
+ // Force retiring cycle
+ Set::gc::force_dispose();
+ for ( auto& i : data ) {
+ EXPECT_EQ( i.nDisposeCount, 1 );
+ }
+
+ // clear
+ for ( auto& i : data ) {
+ i.clear_stat();
+ ASSERT_TRUE( s.insert( i ));
+ }
+ ASSERT_FALSE( s.empty() );
+ ASSERT_CONTAINER_SIZE( s, nSetSize );
+
+ // Iterator test
+ for ( auto it = s.begin(); it != s.end(); ++it ) {
+ ++it->nFindCount;
+ }
+ for ( auto it = s.cbegin(); it != s.cend(); ++it ) {
+ EXPECT_EQ( it->nFindCount, 1 );
+ }
+ for ( auto& i : data ) {
+ EXPECT_EQ( i.nFindCount, 1 );
+ }
+
+ // clear test
+ s.clear();
+
+ ASSERT_TRUE( s.empty());
+ ASSERT_CONTAINER_SIZE( s, 0 );
+ ASSERT_TRUE( s.begin() == s.end() );
+ ASSERT_TRUE( s.cbegin() == s.cend() );
+
+ // Force retiring cycle
+ Set::gc::force_dispose();
+ for ( auto& i : data ) {
+ EXPECT_EQ( i.nDisposeCount, 1 );
+ }
+
+ }
+ };
+
+} // namespace cds_test
+
+#endif // #ifndef CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_ITERABLE_H
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_ITERABLE_HP_H
+#define CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_ITERABLE_HP_H
+
+#include "test_intrusive_michael_iterable.h"
+
+// forward declaration
+namespace cds { namespace intrusive {}}
+
+namespace cds_test {
+
+ namespace ci = cds::intrusive;
+ namespace co = cds::opt;
+
+ class intrusive_set_hp: public intrusive_set
+ {
+ typedef intrusive_set base_class;
+
+ protected:
+
+ template <class Set>
+ void test( Set& s )
+ {
+ // Precondition: set is empty
+ // Postcondition: set is empty
+
+ base_class::test( s );
+
+ ASSERT_TRUE( s.empty() );
+ ASSERT_CONTAINER_SIZE( s, 0 );
+
+ typedef typename Set::value_type value_type;
+
+ std::vector< value_type > data;
+ std::vector< size_t> indices;
+ data.reserve( kSize );
+ indices.reserve( kSize );
+ for ( size_t key = 0; key < kSize; ++key ) {
+ data.push_back( value_type( static_cast<int>(key) ) );
+ indices.push_back( key );
+ }
+ shuffle( indices.begin(), indices.end() );
+
+ typename Set::guarded_ptr gp;
+
+ // get/extract from empty set
+ for ( auto idx : indices ) {
+ auto& i = data[idx];
+
+ gp = s.get( i );
+ ASSERT_TRUE( !gp );
+ gp = s.get( i.key() );
+ ASSERT_TRUE( !gp );
+ gp = s.get_with( other_item( i.key()), other_less());
+ ASSERT_TRUE( !gp );
+
+ gp = s.extract( i );
+ ASSERT_TRUE( !gp );
+ gp = s.extract( i.key());
+ ASSERT_TRUE( !gp );
+ gp = s.extract_with( other_item( i.key()), other_less());
+ ASSERT_TRUE( !gp );
+ }
+
+ // fill set
+ for ( auto& i : data ) {
+ i.nDisposeCount = 0;
+ ASSERT_TRUE( s.insert( i ) );
+ }
+
+ // get/extract
+ for ( auto idx : indices ) {
+ auto& i = data[idx];
+
+ EXPECT_EQ( i.nFindCount, 0 );
+ gp = s.get( i );
+ ASSERT_FALSE( !gp );
+ ++gp->nFindCount;
+ EXPECT_EQ( i.nFindCount, 1 );
+
+ gp = s.get( i.key() );
+ ASSERT_FALSE( !gp );
+ ++gp->nFindCount;
+ EXPECT_EQ( i.nFindCount, 2 );
+
+ gp = s.get_with( other_item( i.key()), other_less());
+ ASSERT_FALSE( !gp );
+ ++gp->nFindCount;
+ EXPECT_EQ( i.nFindCount, 3 );
+
+ EXPECT_EQ( i.nEraseCount, 0 );
+ switch ( i.key() % 3 ) {
+ case 0:
+ gp = s.extract( i.key());
+ break;
+ case 1:
+ gp = s.extract( i );
+ break;
+ case 2:
+ gp = s.extract_with( other_item( i.key() ), other_less() );
+ break;
+ }
+ ASSERT_FALSE( !gp );
+ ++gp->nEraseCount;
+ EXPECT_EQ( i.nEraseCount, 1 );
+
+ gp = s.extract( i );
+ ASSERT_TRUE( !gp );
+ gp = s.extract( i.key() );
+ ASSERT_TRUE( !gp );
+ gp = s.extract_with( other_item( i.key() ), other_less() );
+ ASSERT_TRUE( !gp );
+ }
+
+ gp.release();
+
+ ASSERT_TRUE( s.empty() );
+ ASSERT_CONTAINER_SIZE( s, 0 );
+
+ // Force retiring cycle
+ Set::gc::force_dispose();
+ for ( auto& i : data ) {
+ EXPECT_EQ( i.nDisposeCount, 1 );
+ }
+
+ }
+ };
+
+} // namespace cds_test
+
+#endif // #ifndef CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_ITERABLE_HP_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_LAZY_RCU_H
#define CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_LAZY_RCU_H
this->test( s );
}
+TYPED_TEST_P( IntrusiveMichaelLazySet, base_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::base_item_type base_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< rcu_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
+
+TYPED_TEST_P( IntrusiveMichaelLazySet, base_wrapped_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::base_item_type base_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::base_hook< ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template less<base_item_type> less;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< rcu_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
TYPED_TEST_P( IntrusiveMichaelLazySet, member_cmp )
{
this->test( s );
}
+TYPED_TEST_P( IntrusiveMichaelLazySet, member_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::member_item_type member_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< rcu_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
+
+TYPED_TEST_P( IntrusiveMichaelLazySet, member_wrapped_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::member_item_type member_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::lazy_list::traits
+ {
+ typedef ci::lazy_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< rcu_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( IntrusiveMichaelLazySet,
- base_cmp, base_less, base_cmpmix, base_mutex, member_cmp, member_less, member_cmpmix, member_mutex
+ base_cmp, base_less, base_cmpmix, base_mutex, base_stat, base_wrapped_stat, member_cmp, member_less, member_cmpmix, member_mutex, member_stat, member_wrapped_stat
);
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_MICHAEL_RCU_H
#define CDSUNIT_SET_TEST_INTRUSIVE_MICHAEL_MICHAEL_RCU_H
this->test( s );
}
+TYPED_TEST_P( IntrusiveMichaelSet, base_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::base_item_type base_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template less<base_item_type> less;
+ typedef typename TestFixture::template cmp<base_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< rcu_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
+
+TYPED_TEST_P( IntrusiveMichaelSet, base_wrapped_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::base_item_type base_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::base_hook< ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template less<base_item_type> less;
+ typedef typename TestFixture::template cmp<base_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< rcu_type, base_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
TYPED_TEST_P( IntrusiveMichaelSet, member_cmp )
{
this->test( s );
}
+TYPED_TEST_P( IntrusiveMichaelSet, member_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::member_item_type member_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< rcu_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
+TYPED_TEST_P( IntrusiveMichaelSet, member_wrapped_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::member_item_type member_item_type;
+ typedef typename TestFixture::mock_disposer mock_disposer;
+ typedef typename TestFixture::hash_int hash_int;
+
+ struct list_traits: public ci::michael_list::traits
+ {
+ typedef ci::michael_list::member_hook< offsetof( member_item_type, hMember ), ci::opt::gc<rcu_type>> hook;
+ typedef typename TestFixture::template cmp<member_item_type> compare;
+ typedef mock_disposer disposer;
+ typedef ci::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< rcu_type, member_item_type, list_traits > bucket_type;
+
+ struct set_traits: public ci::michael_set::traits
+ {
+ typedef hash_int hash;
+ typedef typename TestFixture::simple_item_counter item_counter;
+ };
+ typedef ci::MichaelHashSet< rcu_type, bucket_type, set_traits > set_type;
+
+ set_type s( TestFixture::kSize, 2 );
+ this->test( s );
+}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( IntrusiveMichaelSet,
- base_cmp, base_less, base_cmpmix, member_cmp, member_less, member_cmpmix
+ base_cmp, base_less, base_cmpmix, base_stat, base_wrapped_stat, member_cmp, member_less, member_cmpmix, member_stat, member_wrapped_stat
);
set(CDSGTEST_LIST_SOURCES
../main.cpp
+ intrusive_iterable_dhp.cpp
+ intrusive_iterable_hp.cpp
intrusive_lazy_hp.cpp
intrusive_lazy_dhp.cpp
intrusive_lazy_nogc.cpp
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "test_intrusive_iterable_list_hp.h"
+#include <cds/intrusive/iterable_list_dhp.h>
+
+namespace {
+ namespace ci = cds::intrusive;
+ typedef cds::gc::DHP gc_type;
+
+ class IntrusiveIterableList_DHP : public cds_test::intrusive_iterable_list_hp
+ {
+ protected:
+ void SetUp()
+ {
+ typedef ci::IterableList< gc_type, item_type > list_type;
+
+ // +1 - for guarded_ptr
+ // +3 - for iterator test
+ cds::gc::dhp::GarbageCollector::Construct( 16, list_type::c_nHazardPtrCount );
+ cds::threading::Manager::attachThread();
+ }
+
+ void TearDown()
+ {
+ cds::threading::Manager::detachThread();
+ cds::gc::dhp::GarbageCollector::Destruct();
+ }
+ };
+
+ TEST_F( IntrusiveIterableList_DHP, less )
+ {
+ typedef ci::IterableList< gc_type, item_type,
+ typename ci::iterable_list::make_traits<
+ ci::opt::disposer< mock_disposer >
+ ,cds::opt::less< less< item_type >>
+ , cds::opt::item_counter< cds::atomicity::item_counter >
+ >::type
+ > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_DHP, compare )
+ {
+ typedef ci::IterableList< gc_type, item_type,
+ typename ci::iterable_list::make_traits<
+ ci::opt::disposer< mock_disposer >
+ , cds::opt::compare< cmp< item_type >>
+ , cds::opt::item_counter< cds::atomicity::item_counter >
+ >::type
+ > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_DHP, item_counting )
+ {
+ struct traits : public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef cmp< item_type > compare;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_DHP, backoff )
+ {
+ struct traits : public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef cmp< item_type > compare;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::backoff::pause back_off;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_DHP, seqcst )
+ {
+ struct traits : public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::opt::v::sequential_consistent memory_model;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_DHP, stat )
+ {
+ struct traits: public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::iterable_list::stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_DHP, wrapped_stat )
+ {
+ struct traits: public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::iterable_list::wrapped_stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ traits::stat::stat_type st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+} // namespace
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "test_intrusive_iterable_list_hp.h"
+#include <cds/intrusive/iterable_list_hp.h>
+
+namespace {
+ namespace ci = cds::intrusive;
+ typedef cds::gc::HP gc_type;
+
+ class IntrusiveIterableList_HP : public cds_test::intrusive_iterable_list_hp
+ {
+ protected:
+ void SetUp()
+ {
+ typedef ci::IterableList< gc_type, item_type > list_type;
+
+ // +1 - for guarded_ptr
+ // +3 - for iterator test
+ cds::gc::hp::GarbageCollector::Construct( list_type::c_nHazardPtrCount + 3, 1, 16 );
+ cds::threading::Manager::attachThread();
+ }
+
+ void TearDown()
+ {
+ cds::threading::Manager::detachThread();
+ cds::gc::hp::GarbageCollector::Destruct( true );
+ }
+ };
+
+ TEST_F( IntrusiveIterableList_HP, less )
+ {
+ typedef ci::IterableList< gc_type, item_type,
+ typename ci::iterable_list::make_traits<
+ ci::opt::disposer< mock_disposer >
+ ,cds::opt::less< less< item_type >>
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+
+ >::type
+ > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_HP, compare )
+ {
+ typedef ci::IterableList< gc_type, item_type,
+ typename ci::iterable_list::make_traits<
+ ci::opt::disposer< mock_disposer >
+ , cds::opt::compare< cmp< item_type >>
+ , cds::opt::item_counter< cds::atomicity::item_counter >
+ >::type
+ > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_HP, item_counting )
+ {
+ struct traits : public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef cmp< item_type > compare;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_HP, backoff )
+ {
+ struct traits : public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef cmp< item_type > compare;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::backoff::pause back_off;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_HP, seqcst )
+ {
+ struct traits : public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::opt::v::sequential_consistent memory_model;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_HP, stat )
+ {
+ struct traits: public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::iterable_list::stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveIterableList_HP, wrapped_stat )
+ {
+ struct traits: public ci::iterable_list::traits {
+ typedef mock_disposer disposer;
+ typedef intrusive_iterable_list::less< item_type > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::iterable_list::wrapped_stat<> stat;
+ };
+ typedef ci::IterableList< gc_type, item_type, traits > list_type;
+
+ traits::stat::stat_type st;
+
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_list_hp.h"
void TearDown()
{
cds::threading::Manager::detachThread();
- cds::gc::hp::GarbageCollector::Destruct();
+ cds::gc::dhp::GarbageCollector::Destruct();
}
};
test_hp( l );
}
+ TEST_F( IntrusiveLazyList_DHP, base_hook_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef intrusive_list_common::less< base_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveLazyList_DHP, base_hook_wrapped_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef intrusive_list_common::less< base_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
TEST_F( IntrusiveLazyList_DHP, member_hook )
{
typedef ci::LazyList< gc_type, member_item,
test_hp( l );
}
+ TEST_F( IntrusiveLazyList_DHP, member_hook_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef intrusive_list_common::less< member_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveLazyList_DHP, member_hook_wrapped_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef intrusive_list_common::less< member_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_list_hp.h"
test_hp( l );
}
+ TEST_F( IntrusiveLazyList_HP, base_hook_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef intrusive_list_common::less< base_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveLazyList_HP, base_hook_wrapped_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
TEST_F( IntrusiveLazyList_HP, member_hook )
{
typedef ci::LazyList< gc_type, member_item,
test_hp( l );
}
+ TEST_F( IntrusiveLazyList_HP, member_hook_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef intrusive_list_common::less< member_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveLazyList_HP, member_hook_wrapped_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_list_nogc.h"
test_ordered_iterator( l );
}
+ TEST_F( IntrusiveLazyList_NOGC, base_hook_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef intrusive_list_nogc::less< base_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ }
+
+ TEST_F( IntrusiveLazyList_NOGC, base_hook_wrapped_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef intrusive_list_nogc::less< base_item > less;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, base_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ }
+
TEST_F( IntrusiveLazyList_NOGC, member_hook )
{
typedef ci::LazyList< gc_type, member_item,
test_ordered_iterator( l );
}
+ TEST_F( IntrusiveLazyList_NOGC, member_hook_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef intrusive_list_nogc::less< member_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ }
+
+ TEST_F( IntrusiveLazyList_NOGC, member_hook_wrapped_stat )
+ {
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef intrusive_list_nogc::less< member_item > less;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< gc_type, member_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_list_hp.h"
void TearDown()
{
cds::threading::Manager::detachThread();
- cds::gc::hp::GarbageCollector::Destruct();
+ cds::gc::dhp::GarbageCollector::Destruct();
}
};
test_ordered_iterator( l );
test_hp( l );
}
+ TEST_F( IntrusiveMichaelList_DHP, base_hook_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef intrusive_list_common::less< base_item > less;
+ typedef cds::intrusive::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveMichaelList_DHP, base_hook_wrapped_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef cds::intrusive::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item, traits > list_type;
+
+ cds::intrusive::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
TEST_F( IntrusiveMichaelList_DHP, member_hook )
{
test_hp( l );
}
+ TEST_F( IntrusiveMichaelList_DHP, member_hook_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef intrusive_list_common::less< member_item > less;
+ typedef cds::intrusive::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveMichaelList_DHP, member_hook_wrapped_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef cds::intrusive::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item, traits > list_type;
+
+ cds::intrusive::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_intrusive_list_hp.h"
test_hp( l );
}
+ TEST_F( IntrusiveMichaelList_HP, base_hook_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef intrusive_list_common::less< base_item > less;
+ typedef cds::intrusive::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveMichaelList_HP, base_hook_wrapped_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::base_hook< cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< base_item > compare;
+ typedef cds::intrusive::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, base_item, traits > list_type;
+
+ cds::intrusive::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
TEST_F( IntrusiveMichaelList_HP, member_hook )
{
typedef ci::MichaelList< gc_type, member_item,
test_hp( l );
}
+ TEST_F( IntrusiveMichaelList_HP, member_hook_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef intrusive_list_common::less< member_item > less;
+ typedef cds::intrusive::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( IntrusiveMichaelList_HP, member_hook_wrapped_stat )
+ {
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::member_hook< offsetof( member_item, hMember ), cds::opt::gc< gc_type >> hook;
+ typedef mock_disposer disposer;
+ typedef cmp< member_item > compare;
+ typedef cds::intrusive::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< gc_type, member_item, traits > list_type;
+
+ cds::intrusive::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_kv_list_hp.h"
void TearDown()
{
cds::threading::Manager::detachThread();
- cds::gc::hp::GarbageCollector::Destruct();
+ cds::gc::dhp::GarbageCollector::Destruct();
}
};
test_hp( l );
}
+ TEST_F( LazyKVList_DHP, stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList<gc_type, key_type, value_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( LazyKVList_DHP, wrapped_stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList<gc_type, key_type, value_type, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_kv_list_hp.h"
test_hp( l );
}
+ TEST_F( LazyKVList_HP, stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList<gc_type, key_type, value_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( LazyKVList_HP, wrapped_stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList<gc_type, key_type, value_type, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_kv_list_nogc.h"
test_ordered_iterator( l );
}
+ TEST_F( LazyKVList_NOGC, stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList<gc_type, key_type, value_type, traits > list_type;
+
+ list_type l;
+ test( l );
+ test_ordered_iterator( l );
+ }
+
+ TEST_F( LazyKVList_NOGC, wrapped_stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList<gc_type, key_type, value_type, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ test( l );
+ test_ordered_iterator( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_kv_list_hp.h"
void TearDown()
{
cds::threading::Manager::detachThread();
- cds::gc::hp::GarbageCollector::Destruct();
+ cds::gc::dhp::GarbageCollector::Destruct();
}
};
test_hp( l );
}
+ TEST_F( MichaelKVList_DHP, stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList<gc_type, key_type, value_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( MichaelKVList_DHP, wrapped_stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList<gc_type, key_type, value_type, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_kv_list_hp.h"
test_hp( l );
}
+ TEST_F( MichaelKVList_HP, stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList<gc_type, key_type, value_type, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( MichaelKVList_HP, wrapped_stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList<gc_type, key_type, value_type, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_kv_list_nogc.h"
test_ordered_iterator( l );
}
+ TEST_F( MichaelKVList_NOGC, stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList<gc_type, key_type, value_type, traits > list_type;
+
+ list_type l;
+ test( l );
+ test_ordered_iterator( l );
+ }
+
+ TEST_F( MichaelKVList_NOGC, wrapped_stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList<gc_type, key_type, value_type, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l( st );
+ test( l );
+ test_ordered_iterator( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_list_hp.h"
void TearDown()
{
cds::threading::Manager::detachThread();
- cds::gc::hp::GarbageCollector::Destruct();
+ cds::gc::dhp::GarbageCollector::Destruct();
}
};
test_hp( l );
}
+ TEST_F( LazyList_DHP, stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyList<gc_type, item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( LazyList_DHP, wrapped_stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyList<gc_type, item, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_list_hp.h"
test_hp( l );
}
+ TEST_F( LazyList_HP, stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyList<gc_type, item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( LazyList_HP, wrapped_stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyList<gc_type, item, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_list_nogc.h"
test_ordered_iterator( l );
}
+ TEST_F( LazyList_NOGC, stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyList<gc_type, item, traits > list_type;
+
+ list_type l;
+ test( l );
+ test_ordered_iterator( l );
+ }
+
+ TEST_F( LazyList_NOGC, wrapped_stat )
+ {
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyList<gc_type, item, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ test( l );
+ test_ordered_iterator( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_list_hp.h"
void TearDown()
{
cds::threading::Manager::detachThread();
- cds::gc::hp::GarbageCollector::Destruct();
+ cds::gc::dhp::GarbageCollector::Destruct();
}
};
test_hp( l );
}
+ TEST_F( MichaelList_DHP, stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+
+ };
+ typedef cc::MichaelList<gc_type, item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( MichaelList_DHP, wrapped_stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+
+ };
+ typedef cc::MichaelList<gc_type, item, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_list_hp.h"
test_hp( l );
}
+ TEST_F( MichaelList_HP, stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+
+ };
+ typedef cc::MichaelList<gc_type, item, traits > list_type;
+
+ list_type l;
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
+ TEST_F( MichaelList_HP, wrapped_stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+
+ };
+ typedef cc::MichaelList<gc_type, item, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l( st );
+ test_common( l );
+ test_ordered_iterator( l );
+ test_hp( l );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_list_nogc.h"
test_ordered_iterator( l );
}
+ TEST_F( MichaelList_NOGC, stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelList<gc_type, item, traits > list_type;
+
+ list_type l;
+ test( l );
+ test_ordered_iterator( l );
+ }
+
+ TEST_F( MichaelList_NOGC, wrapped_stat )
+ {
+ struct traits: public cc::michael_list::traits
+ {
+ typedef lt<item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelList<gc_type, item, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l{ st };
+ test( l );
+ test_ordered_iterator( l );
+ }
+
} // namespace
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef CDSUNIT_LIST_TEST_INTRUSIVE_ITERABLE_LIST_H
+#define CDSUNIT_LIST_TEST_INTRUSIVE_ITERABLE_LIST_H
+
+#include <cds_test/check_size.h>
+#include <cds_test/fixture.h>
+
+namespace cds_test {
+
+ class intrusive_iterable_list : public fixture
+ {
+ public:
+ struct stat {
+ int nDisposeCount;
+ int nUpdateExistsCall;
+ int nUpdateNewCall;
+ int nFindCall;
+ int nEraseCall;
+ int nInsertCall;
+
+ stat()
+ : nDisposeCount( 0 )
+ , nUpdateExistsCall( 0 )
+ , nUpdateNewCall( 0 )
+ , nFindCall( 0 )
+ , nEraseCall( 0 )
+ , nInsertCall( 0 )
+ {}
+
+ stat( const stat& s )
+ {
+ *this = s;
+ }
+
+ stat& operator =( const stat& s )
+ {
+ memcpy( this, &s, sizeof( s ) );
+ return *this;
+ }
+ };
+
+ struct item_type
+ {
+ int nKey;
+ int nVal;
+
+ mutable stat s;
+
+ item_type()
+ {}
+
+ item_type( int key, int val )
+ : nKey( key )
+ , nVal( val )
+ , s()
+ {}
+
+ item_type( const item_type& v )
+ : nKey( v.nKey )
+ , nVal( v.nVal )
+ , s()
+ {}
+
+ const int& key() const
+ {
+ return nKey;
+ }
+
+ item_type& operator=( item_type const& src )
+ {
+ nKey = src.nKey;
+ nVal = src.nVal;
+ return *this;
+ }
+
+ item_type& operator=( item_type&& src )
+ {
+ nKey = src.nKey;
+ nVal = src.nVal;
+ return *this;
+ }
+ };
+
+ template <typename T>
+ struct less
+ {
+ bool operator ()( const T& v1, const T& v2 ) const
+ {
+ return v1.key() < v2.key();
+ }
+
+ template <typename Q>
+ bool operator ()( const T& v1, const Q& v2 ) const
+ {
+ return v1.key() < v2;
+ }
+
+ template <typename Q>
+ bool operator ()( const Q& v1, const T& v2 ) const
+ {
+ return v1 < v2.key();
+ }
+ };
+
+ struct other_item {
+ int nKey;
+
+ other_item( int n )
+ : nKey( n )
+ {}
+ };
+
+ struct other_less {
+ template <typename T, typename Q>
+ bool operator()( T const& i1, Q const& i2 ) const
+ {
+ return i1.nKey < i2.nKey;
+ }
+ };
+
+ template <typename T>
+ struct cmp {
+ int operator ()( const T& v1, const T& v2 ) const
+ {
+ if ( v1.key() < v2.key() )
+ return -1;
+ return v1.key() > v2.key() ? 1 : 0;
+ }
+
+ template <typename Q>
+ int operator ()( const T& v1, const Q& v2 ) const
+ {
+ if ( v1.key() < v2 )
+ return -1;
+ return v1.key() > v2 ? 1 : 0;
+ }
+
+ template <typename Q>
+ int operator ()( const Q& v1, const T& v2 ) const
+ {
+ if ( v1 < v2.key() )
+ return -1;
+ return v1 > v2.key() ? 1 : 0;
+ }
+ };
+
+ struct mock_disposer
+ {
+ template <typename T>
+ void operator ()( T * p )
+ {
+ ++p->s.nDisposeCount;
+ }
+ };
+
+ struct update_functor
+ {
+ template <typename T>
+ void operator()( T& item, T * old )
+ {
+ if ( !old )
+ ++item.s.nUpdateNewCall;
+ else
+ ++item.s.nUpdateExistsCall;
+ }
+ };
+
+ struct find_functor
+ {
+ template <typename T, typename Q>
+ void operator ()( T& item, Q& /*val*/ )
+ {
+ ++item.s.nFindCall;
+ }
+ };
+
+ struct erase_functor
+ {
+ template <typename T>
+ void operator()( T const& item )
+ {
+ item.s.nEraseCall++;
+ }
+ };
+
+ protected:
+ template <typename List>
+ void test_common( List& l )
+ {
+ // Precondition: list is empty
+ // Postcondition: list is empty
+
+ static const size_t nSize = 20;
+ typedef typename List::value_type value_type;
+ value_type arr[ nSize ];
+ value_type arr2[ nSize ];
+
+ for ( size_t i = 0; i < nSize; ++i ) {
+ arr[i].nKey = static_cast<int>( i );
+ arr[i].nVal = arr[i].nKey * 10;
+
+ arr2[i] = arr[i];
+ }
+ shuffle( arr, arr + nSize );
+ shuffle( arr2, arr2 + nSize );
+
+ ASSERT_TRUE( l.empty() );
+ ASSERT_CONTAINER_SIZE( l, 0 );
+
+ typedef typename List::iterator iterator;
+
+ // insert / find
+ for ( auto& i : arr ) {
+ EXPECT_FALSE( l.contains( i.nKey ));
+ EXPECT_FALSE( l.contains( other_item( i.nKey ), other_less()));
+ EXPECT_FALSE( l.find( i.nKey, []( value_type& item, int ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 0 );
+ EXPECT_FALSE( l.find_with( other_item( i.nKey ), other_less(), []( value_type& item, other_item const& ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 0 );
+
+ switch ( i.nKey % 4 ) {
+ case 0:
+ EXPECT_TRUE( l.insert( i ));
+ break;
+ case 1:
+ EXPECT_EQ( i.s.nInsertCall, 0 );
+ EXPECT_TRUE( l.insert( i, []( value_type& i ) { ++i.s.nInsertCall; } ));
+ EXPECT_EQ( i.s.nInsertCall, 1 );
+ break;
+ case 2:
+ {
+ std::pair<bool, bool> ret = l.update( i, []( value_type& i, value_type * old ) {
+ EXPECT_TRUE( old == nullptr );
+ EXPECT_EQ( i.s.nUpdateNewCall, 0 );
+ ++i.s.nUpdateNewCall;
+ }, false );
+ EXPECT_EQ( i.s.nUpdateNewCall, 0 );
+ EXPECT_EQ( ret.first, false );
+ EXPECT_EQ( ret.second, false );
+
+ ret = l.update( i, []( value_type& i, value_type * old ) {
+ EXPECT_TRUE( old == nullptr );
+ EXPECT_EQ( i.s.nUpdateNewCall, 0 );
+ ++i.s.nUpdateNewCall;
+ }, true );
+ EXPECT_EQ( i.s.nUpdateNewCall, 1 );
+ EXPECT_EQ( ret.first, true );
+ EXPECT_EQ( ret.second, true );
+ }
+ break;
+ case 3:
+ {
+ std::pair<bool, bool> ret = l.upsert( i, false );
+ EXPECT_EQ( ret.first, false );
+ EXPECT_EQ( ret.second, false );
+
+ ret = l.upsert( i );
+ EXPECT_EQ( ret.first, true );
+ EXPECT_EQ( ret.second, true );
+ }
+ break;
+ }
+
+ EXPECT_TRUE( l.contains( i.nKey ));
+ EXPECT_TRUE( l.contains( i ));
+ EXPECT_TRUE( l.contains( other_item( i.nKey ), other_less()));
+ EXPECT_TRUE( l.find( i.nKey, []( value_type& item, int ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 1 );
+ EXPECT_TRUE( l.find( i, []( value_type& item, value_type const& ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 2 );
+ EXPECT_TRUE( l.find_with( other_item( i.nKey ), other_less(), []( value_type& item, other_item const& ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 3 );
+
+ EXPECT_FALSE( l.insert( i ) );
+ ASSERT_FALSE( l.empty() );
+
+ int const ckey = i.nKey;
+ iterator it = l.find( ckey );
+ ASSERT_FALSE( it == l.end() );
+ EXPECT_EQ( it->nKey, i.nKey );
+ EXPECT_EQ( (*it).nVal, i.nVal );
+ check_ordered( it, l.end() );
+
+ it = l.find( i.nKey );
+ ASSERT_FALSE( it == l.end() );
+ EXPECT_EQ( it->nKey, i.nKey );
+ EXPECT_EQ( (*it).nVal, i.nVal );
+ check_ordered( it, l.end() );
+
+ it = l.find_with( other_item( i.nKey ), other_less() );
+ ASSERT_FALSE( it == l.end() );
+ EXPECT_EQ( it->nKey, i.nKey );
+ EXPECT_EQ( it->nVal, i.nVal );
+ check_ordered( it, l.end() );
+
+ }
+ ASSERT_CONTAINER_SIZE( l, nSize );
+
+ // check all items
+ for ( auto const& i : arr ) {
+ EXPECT_TRUE( l.contains( i.nKey ));
+ EXPECT_TRUE( l.contains( i ));
+ EXPECT_TRUE( l.contains( other_item( i.nKey ), other_less()));
+ EXPECT_TRUE( l.find( i.nKey, []( value_type& item, int ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 4 );
+ EXPECT_TRUE( l.find( i, []( value_type& item, value_type const& ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 5 );
+ EXPECT_TRUE( l.find_with( other_item( i.nKey ), other_less(), []( value_type& item, other_item const& ) { ++item.s.nFindCall; } ));
+ EXPECT_EQ( i.s.nFindCall, 6 );
+ }
+ ASSERT_FALSE( l.empty() );
+ ASSERT_CONTAINER_SIZE( l, nSize );
+
+ // update existing test
+ for ( auto& i : arr2 ) {
+ EXPECT_EQ( i.s.nUpdateExistsCall, 0 );
+ std::pair<bool, bool> ret = l.update( i, update_functor() );
+ EXPECT_TRUE( ret.first );
+ EXPECT_FALSE( ret.second );
+ EXPECT_EQ( i.s.nUpdateExistsCall, 1 );
+ }
+
+ // update with the same value must be empty - no functor is called
+ for ( auto& i : arr2 ) {
+ EXPECT_EQ( i.s.nUpdateExistsCall, 1 );
+ std::pair<bool, bool> ret = l.update( i, update_functor() );
+ EXPECT_TRUE( ret.first );
+ EXPECT_FALSE( ret.second );
+ EXPECT_EQ( i.s.nUpdateExistsCall, 1 );
+ }
+
+ for ( auto& i : arr ) {
+ EXPECT_EQ( i.s.nUpdateExistsCall, 0 );
+ std::pair<bool, bool> ret = l.update( i, []( value_type& i, value_type * old ) {
+ EXPECT_FALSE( old == nullptr );
+ EXPECT_EQ( i.s.nUpdateExistsCall, 0 );
+ ++i.s.nUpdateExistsCall;
+ });
+ EXPECT_TRUE( ret.first );
+ EXPECT_FALSE( ret.second );
+ EXPECT_EQ( i.s.nUpdateExistsCall, 1 );
+ }
+
+ // erase test
+ for ( auto const& i : arr ) {
+ if ( i.nKey & 1 )
+ EXPECT_TRUE( l.erase( i.nKey ));
+ else
+ EXPECT_TRUE( l.erase_with( other_item( i.nKey ), other_less() ));
+
+ EXPECT_FALSE( l.contains( i ));
+ }
+ EXPECT_TRUE( l.empty() );
+ EXPECT_CONTAINER_SIZE( l, 0 );
+
+ // Apply retired pointer to clean links
+ List::gc::force_dispose();
+
+ for ( auto const& i : arr )
+ EXPECT_EQ( i.s.nDisposeCount, 2 );
+ for ( auto const& i : arr2 )
+ EXPECT_EQ( i.s.nDisposeCount, 1 );
+
+ // erase with functor
+ for ( auto& i : arr ) {
+ int const updateNewCall = i.s.nUpdateNewCall;
+ std::pair<bool, bool> ret = l.update( i, update_functor(), false );
+ EXPECT_FALSE( ret.first );
+ EXPECT_FALSE( ret.second );
+ EXPECT_EQ( i.s.nUpdateNewCall, updateNewCall );
+
+ ret = l.update( i, update_functor(), true );
+ EXPECT_TRUE( ret.first );
+ EXPECT_TRUE( ret.second );
+ EXPECT_EQ( i.s.nUpdateNewCall, updateNewCall + 1 );
+ }
+ EXPECT_FALSE( l.empty() );
+ EXPECT_CONTAINER_SIZE( l, nSize );
+
+ for ( auto const& i : arr ) {
+ EXPECT_EQ( i.s.nEraseCall, 0 );
+ if ( i.nKey & 1 ) {
+ EXPECT_TRUE( l.erase_with( other_item( i.nKey ), other_less(), erase_functor()));
+ EXPECT_FALSE( l.erase_with( other_item( i.nKey ), other_less(), erase_functor()));
+ }
+ else {
+ EXPECT_TRUE( l.erase( i.nKey, []( value_type& item) { ++item.s.nEraseCall; } ));
+ EXPECT_FALSE( l.erase( i.nKey, []( value_type& item) { ++item.s.nEraseCall; } ));
+ }
+ EXPECT_EQ( i.s.nEraseCall, 1 );
+ EXPECT_FALSE( l.contains( i.nKey ));
+ }
+ EXPECT_TRUE( l.empty() );
+ EXPECT_CONTAINER_SIZE( l, 0 );
+
+ // Apply retired pointer to clean links
+ List::gc::force_dispose();
+
+ for ( auto const& i : arr )
+ EXPECT_EQ( i.s.nDisposeCount, 3 );
+
+ // clear test
+ for ( auto& i : arr )
+ EXPECT_TRUE( l.insert( i ));
+
+ EXPECT_FALSE( l.empty() );
+ EXPECT_CONTAINER_SIZE( l, nSize );
+
+ l.clear();
+
+ EXPECT_TRUE( l.empty() );
+ EXPECT_CONTAINER_SIZE( l, 0 );
+
+ // Apply retired pointer to clean links
+ List::gc::force_dispose();
+ for ( auto const& i : arr ) {
+ EXPECT_EQ( i.s.nDisposeCount, 4 );
+ EXPECT_FALSE( l.contains( i ));
+ }
+
+ // unlink test
+ for ( auto& i : arr )
+ EXPECT_TRUE( l.insert( i ) );
+ for ( auto& i : arr ) {
+ value_type val( i );
+ EXPECT_TRUE( l.contains( val ));
+ EXPECT_FALSE( l.unlink( val ));
+ EXPECT_TRUE( l.contains( val ) );
+ EXPECT_TRUE( l.unlink( i ));
+ EXPECT_FALSE( l.unlink( i ));
+ EXPECT_FALSE( l.contains( i ) );
+ }
+ EXPECT_TRUE( l.empty() );
+ EXPECT_CONTAINER_SIZE( l, 0 );
+
+ // Apply retired pointer to clean links
+ List::gc::force_dispose();
+ for ( auto const& i : arr ) {
+ EXPECT_EQ( i.s.nDisposeCount, 5 );
+ EXPECT_FALSE( l.contains( i ) );
+ }
+
+ // Iterators on empty list
+ {
+ auto it = l.begin();
+ auto itEnd = l.end();
+ auto cit = l.cbegin();
+ auto citEnd = l.cend();
+
+ EXPECT_TRUE( it == itEnd );
+ EXPECT_TRUE( it == cit );
+ EXPECT_TRUE( cit == citEnd );
+
+ ++it;
+ ++cit;
+
+ EXPECT_TRUE( it == itEnd );
+ EXPECT_TRUE( it == cit );
+ EXPECT_TRUE( cit == citEnd );
+ }
+ }
+
+ template <typename List>
+ void test_ordered_iterator( List& l )
+ {
+ // Precondition: list is empty
+ // Postcondition: list is empty
+
+ static const size_t nSize = 20;
+ typedef typename List::value_type value_type;
+ value_type arr[nSize];
+
+ for ( size_t i = 0; i < nSize; ++i ) {
+ arr[i].nKey = static_cast<int>(i);
+ arr[i].nVal = arr[i].nKey * 10;
+ }
+ shuffle( arr, arr + nSize );
+
+ ASSERT_TRUE( l.empty() );
+ ASSERT_CONTAINER_SIZE( l, 0 );
+
+ for ( auto& i : arr )
+ EXPECT_TRUE( l.insert( i ) );
+
+ int key = 0;
+ for ( auto it = l.begin(); it != l.end(); ++it ) {
+ EXPECT_EQ( it->nKey, key );
+ EXPECT_EQ( (*it).nKey, key );
+ ++key;
+ }
+
+ key = 0;
+ for ( auto it = l.cbegin(); it != l.cend(); ++it ) {
+ EXPECT_EQ( it->nKey, key );
+ EXPECT_EQ( (*it).nKey, key );
+ ++key;
+ }
+
+ l.clear();
+ List::gc::force_dispose();
+ for ( auto const& i : arr ) {
+ EXPECT_EQ( i.s.nDisposeCount, 1 );
+ EXPECT_FALSE( l.contains( i ) );
+ }
+ }
+
+ template <typename Iterator>
+ void check_ordered( Iterator first, Iterator last )
+ {
+ while ( first != last ) {
+ Iterator it = first;
+ if ( ++it != last ) {
+ EXPECT_LT( first->nKey, it->nKey );
+ }
+ first = it;
+ }
+ }
+
+ };
+
+} // namespace cds_test
+
+#endif // CDSUNIT_LIST_TEST_INTRUSIVE_ITERABLE_LIST_H
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef CDSUNIT_LIST_TEST_INTRUSIVE_ITERABLE_LIST_HP_H
+#define CDSUNIT_LIST_TEST_INTRUSIVE_ITERABLE_LIST_HP_H
+
+#include "test_intrusive_iterable_list.h"
+
+namespace cds_test {
+
+ class intrusive_iterable_list_hp : public intrusive_iterable_list
+ {
+ protected:
+ template <typename List>
+ void test_hp( List& l )
+ {
+ // Precondition: list is empty
+ // Postcondition: list is empty
+
+ static const size_t nSize = 20;
+ typedef typename List::value_type value_type;
+ value_type arr[nSize];
+
+ for ( size_t i = 0; i < nSize; ++i ) {
+ arr[i].nKey = static_cast<int>(i);
+ arr[i].nVal = arr[i].nKey * 10;
+ }
+ shuffle( arr, arr + nSize );
+
+ typedef typename List::guarded_ptr guarded_ptr;
+
+ ASSERT_TRUE( l.empty() );
+ ASSERT_CONTAINER_SIZE( l, 0 );
+
+ guarded_ptr gp;
+
+ // get() test
+ for ( auto& i : arr ) {
+ gp = l.get( i.nKey );
+ EXPECT_TRUE( !gp );
+ gp = l.get_with( other_item( i.nKey ), other_less() );
+ EXPECT_TRUE( !gp );
+
+ EXPECT_TRUE( l.insert( i ) );
+
+ gp = l.get( i.nKey );
+ ASSERT_FALSE( !gp );
+ EXPECT_EQ( gp->nKey, i.nKey );
+ EXPECT_EQ( gp->nVal, i.nVal );
+ gp = l.get_with( other_item( i.nKey ), other_less() );
+ ASSERT_FALSE( !gp );
+ EXPECT_EQ( gp->nKey, i.nKey );
+ EXPECT_EQ( gp->nVal, i.nVal );
+ }
+
+ // extract() test
+ for ( int i = 0; i < static_cast<int>(nSize); ++i ) {
+ if ( i & 1 )
+ gp = l.extract( i );
+ else
+ gp = l.extract_with( other_item( i ), other_less() );
+ ASSERT_FALSE( !gp );
+ EXPECT_EQ( gp->nKey, i );
+
+ gp = l.extract( i );
+ EXPECT_TRUE( !gp );
+ gp = l.extract_with( other_item( i ), other_less() );
+ EXPECT_TRUE( !gp );
+ }
+
+ ASSERT_TRUE( l.empty() );
+ ASSERT_CONTAINER_SIZE( l, 0 );
+
+ List::gc::force_dispose();
+ for ( auto const& i : arr ) {
+ EXPECT_EQ( i.s.nDisposeCount, 1 );
+ EXPECT_FALSE( l.contains( i ) );
+ }
+ }
+ };
+
+} // namespace cds_test
+
+#endif // CDSUNIT_LIST_TEST_INTRUSIVE_ITERABLE_LIST_HP_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_LIST_TEST_INTRUSIVE_LAZY_LIST_RCU_H
#define CDSUNIT_LIST_TEST_INTRUSIVE_LAZY_LIST_RCU_H
this->test_rcu( l );
}
+TYPED_TEST_P( IntrusiveLazyList, base_hook_stat )
+{
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::base_item > compare;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< typename TestFixture::rcu_type, typename TestFixture::base_item, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( IntrusiveLazyList, base_hook_wrapped_stat )
+{
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::base_hook< cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::base_item > compare;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< typename TestFixture::rcu_type, typename TestFixture::base_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
TYPED_TEST_P( IntrusiveLazyList, member_hook )
{
typedef ci::LazyList< typename TestFixture::rcu_type, typename TestFixture::member_item,
this->test_rcu( l );
}
+TYPED_TEST_P( IntrusiveLazyList, member_hook_stat )
+{
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( typename TestFixture::member_item, hMember ), cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::member_item > compare;
+ typedef typename TestFixture::template less< typename TestFixture::member_item > less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::stat<> stat;
+ };
+ typedef ci::LazyList< typename TestFixture::rcu_type, typename TestFixture::member_item, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( IntrusiveLazyList, member_hook_wrapped_stat )
+{
+ struct traits: public ci::lazy_list::traits {
+ typedef ci::lazy_list::member_hook< offsetof( typename TestFixture::member_item, hMember ), cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::member_item > compare;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::intrusive::lazy_list::wrapped_stat<> stat;
+ };
+ typedef ci::LazyList< typename TestFixture::rcu_type, typename TestFixture::member_item, traits > list_type;
+
+ cds::intrusive::lazy_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( IntrusiveLazyList,
- base_hook, base_hook_cmp, base_hook_item_counting, base_hook_backoff, base_hook_seqcst, member_hook, member_hook_cmp, member_hook_item_counting, member_hook_seqcst, member_hook_back_off
+ base_hook, base_hook_cmp, base_hook_item_counting, base_hook_backoff, base_hook_seqcst, base_hook_stat, base_hook_wrapped_stat, member_hook, member_hook_cmp, member_hook_item_counting, member_hook_seqcst, member_hook_back_off, member_hook_stat, member_hook_wrapped_stat
);
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_LIST_TEST_INTRUSIVE_MICHAEL_LIST_RCU_H
#define CDSUNIT_LIST_TEST_INTRUSIVE_MICHAEL_LIST_RCU_H
this->test_rcu( l );
}
+TYPED_TEST_P( IntrusiveMichaelList, base_hook_stat )
+{
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::base_hook< cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::base_item > compare;
+ typedef cds::intrusive::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< typename TestFixture::rcu_type, typename TestFixture::base_item, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( IntrusiveMichaelList, base_hook_wrapped_stat )
+{
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::base_hook< cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::base_item > compare;
+ typedef cds::intrusive::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< typename TestFixture::rcu_type, typename TestFixture::base_item, traits > list_type;
+
+ cds::intrusive::michael_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
TYPED_TEST_P( IntrusiveMichaelList, member_hook )
{
typedef ci::MichaelList< typename TestFixture::rcu_type, typename TestFixture::member_item,
this->test_rcu( l );
}
+TYPED_TEST_P( IntrusiveMichaelList, member_hook_stat )
+{
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::member_hook< offsetof( typename TestFixture::member_item, hMember ), cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::member_item > compare;
+ typedef cds::intrusive::michael_list::stat<> stat;
+ };
+ typedef ci::MichaelList< typename TestFixture::rcu_type, typename TestFixture::member_item, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( IntrusiveMichaelList, member_hook_wrapped_stat )
+{
+ struct traits: public ci::michael_list::traits {
+ typedef ci::michael_list::member_hook< offsetof( typename TestFixture::member_item, hMember ), cds::opt::gc< typename TestFixture::rcu_type >> hook;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef typename TestFixture::template cmp< typename TestFixture::member_item > compare;
+ typedef cds::intrusive::michael_list::wrapped_stat<> stat;
+ };
+ typedef ci::MichaelList< typename TestFixture::rcu_type, typename TestFixture::member_item, traits > list_type;
+
+ cds::intrusive::michael_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( IntrusiveMichaelList,
- base_hook, base_hook_cmp, base_hook_item_counting, base_hook_backoff, base_hook_seqcst, member_hook, member_hook_cmp, member_hook_item_counting, member_hook_seqcst, member_hook_back_off
+ base_hook, base_hook_cmp, base_hook_item_counting, base_hook_backoff, base_hook_seqcst, base_hook_stat, base_hook_wrapped_stat, member_hook, member_hook_cmp, member_hook_item_counting, member_hook_seqcst, member_hook_back_off, member_hook_stat, member_hook_wrapped_stat
);
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_LIST_TEST_KV_LAZY_LIST_RCU_H
this->test_rcu( l );
}
+TYPED_TEST_P( LazyKVList, stat )
+{
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList<typename TestFixture::rcu_type, typename TestFixture::key_type, typename TestFixture::value_type, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( LazyKVList, wrapped_stat )
+{
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList<typename TestFixture::rcu_type, typename TestFixture::key_type, typename TestFixture::value_type, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( LazyKVList,
- less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst, mutex
+ less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst, mutex, stat, wrapped_stat
);
#endif // CDSUNIT_LIST_TEST_KV_LAZY_LIST_RCU_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_LIST_TEST_MICHAEL_KV_LIST_RCU_H
#define CDSUNIT_LIST_TEST_MICHAEL_KV_LIST_RCU_H
this->test_rcu( l );
}
+TYPED_TEST_P( MichaelKVList, stat )
+{
+ struct traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList<typename TestFixture::rcu_type, typename TestFixture::key_type, typename TestFixture::value_type, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( MichaelKVList, wrapped_stat )
+{
+ struct traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::lt less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList<typename TestFixture::rcu_type, typename TestFixture::key_type, typename TestFixture::value_type, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( MichaelKVList,
- less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst
+ less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst, stat, wrapped_stat
);
#endif // CDSUNIT_LIST_TEST_MICHAEL_KV_LIST_RCU_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_LIST_TEST_LAZY_LIST_RCU_H
this->test_rcu( l );
}
+TYPED_TEST_P( LazyList, stat )
+{
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::template lt< typename TestFixture::item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyList<typename TestFixture::rcu_type, typename TestFixture::item, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( LazyList, wrapped_stat )
+{
+ struct traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::template lt< typename TestFixture::item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyList<typename TestFixture::rcu_type, typename TestFixture::item, traits > list_type;
+
+ cds::container::lazy_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
TYPED_TEST_P( LazyList, mutex )
{
struct traits : public cc::lazy_list::traits
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( LazyList,
- less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst, mutex
+ less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst, mutex, stat, wrapped_stat
);
#endif // CDSUNIT_LIST_TEST_LAZY_LIST_RCU_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_LIST_TEST_MICHAEL_LIST_RCU_H
#define CDSUNIT_LIST_TEST_MICHAEL_LIST_RCU_H
this->test_rcu( l );
}
+TYPED_TEST_P( MichaelList, stat )
+{
+ struct traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::template lt< typename TestFixture::item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelList<typename TestFixture::rcu_type, typename TestFixture::item, traits > list_type;
+
+ list_type l;
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
+TYPED_TEST_P( MichaelList, wrapped_stat )
+{
+ struct traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::template lt< typename TestFixture::item> less;
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::container::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelList<typename TestFixture::rcu_type, typename TestFixture::item, traits > list_type;
+
+ cds::container::michael_list::stat<> st;
+ list_type l( st );
+ this->test_common( l );
+ this->test_ordered_iterator( l );
+ this->test_rcu( l );
+}
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( MichaelList,
- less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst
+ less_ordered, compare_ordered, mix_ordered, item_counting, backoff, seq_cst, stat, wrapped_stat
);
#endif // CDSUNIT_LIST_TEST_MICHAEL_LIST_RCU_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <gtest/gtest.h>
#include <cds/algo/int_algo.h>
-#include <cds/os/timer.h>
+//#include <cds/details/bit_reverse_counter.h>
namespace {
class bitop : public ::testing::Test
}
}
+ /*
+ TEST_F( bitop, bit_reverse_counter )
+ {
+ cds::bitop::bit_reverse_counter<> c;
+
+ while ( c.value() < 8 ) {
+ size_t res = c.inc();
+ std::cout << "inc result: " << res
+ << " value: " << c.value()
+ << " reversed: " << c.reversed_value()
+ << " high_bit: " << c.high_bit() << "\n";
+ }
+
+ while ( c.value() > 0 ) {
+ size_t res = c.dec();
+ std::cout << "dec result: " << res
+ << " value: " << c.value()
+ << " reversed: " << c.reversed_value()
+ << " high_bit: " << c.high_bit() << "\n";
+ }
+ }
+ */
+
} // namespace
test( *pq );
}
+ TEST_F( IntrusiveMSPQueue, bit_reverse_counter )
+ {
+ typedef cds::intrusive::MSPriorityQueue< value_type,
+ cds::intrusive::mspriority_queue::make_traits<
+ cds::opt::buffer< dyn_buffer_type >
+ , cds::opt::less< less >
+ , cds::opt::item_counter< cds::bitop::bit_reverse_counter<>>
+ >::type
+ > pqueue;
+
+ pqueue pq( c_nCapacity );
+ test( pq );
+ }
+
+ TEST_F( IntrusiveMSPQueue, monotonic_counter )
+ {
+ typedef cds::intrusive::MSPriorityQueue< value_type,
+ cds::intrusive::mspriority_queue::make_traits<
+ cds::opt::buffer< dyn_buffer_type >
+ , cds::opt::less< less >
+ , cds::opt::item_counter< cds::intrusive::mspriority_queue::monotonic_counter >
+ >::type
+ > pqueue;
+
+ pqueue pq( c_nCapacity );
+ test( pq );
+ }
+
} // namespace
test( *pq );
}
+ TEST_F( MSPQueue, bit_reverse_counter )
+ {
+ typedef cds::container::MSPriorityQueue< value_type,
+ cds::container::mspriority_queue::make_traits<
+ cds::opt::buffer< dyn_buffer_type >
+ ,cds::opt::less< less >
+ ,cds::opt::item_counter< cds::bitop::bit_reverse_counter<>>
+ >::type
+ > pqueue;
+
+ pqueue pq( c_nCapacity );
+ test( pq );
+ }
+
+ TEST_F( MSPQueue, monotonic_counter )
+ {
+ typedef cds::container::MSPriorityQueue< value_type,
+ cds::container::mspriority_queue::make_traits<
+ cds::opt::buffer< dyn_buffer_type >
+ , cds::opt::less< less >
+ , cds::opt::item_counter< cds::container::mspriority_queue::monotonic_counter>
+ >::type
+ > pqueue;
+
+ pqueue pq( c_nCapacity );
+ test( pq );
+ }
+
} // namespace