/*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* limitations under the License.
*/
-#ifndef FOLLY_INDEXEDMEMPOOL_H
-#define FOLLY_INDEXEDMEMPOOL_H
+#pragma once
#include <type_traits>
-#include <stdint.h>
#include <assert.h>
-#include <unistd.h>
-#include <sys/mman.h>
+#include <errno.h>
+#include <stdint.h>
#include <boost/noncopyable.hpp>
#include <folly/AtomicStruct.h>
#include <folly/detail/CacheLocality.h>
+#include <folly/portability/SysMman.h>
+#include <folly/portability/Unistd.h>
// Ignore shadowing warnings within this file, so includers can use -Wshadow.
#pragma GCC diagnostic push
/// constructed, but delays element construction. This means that only
/// elements that are actually returned to the caller get paged into the
/// process's resident set (RSS).
-template <typename T,
- int NumLocalLists_ = 32,
- int LocalListLimit_ = 200,
- template<typename> class Atom = std::atomic,
- bool EagerRecycleWhenTrivial = false,
- bool EagerRecycleWhenNotTrivial = true>
+template <
+ typename T,
+ uint32_t NumLocalLists_ = 32,
+ uint32_t LocalListLimit_ = 200,
+ template <typename> class Atom = std::atomic,
+ bool EagerRecycleWhenTrivial = false,
+ bool EagerRecycleWhenNotTrivial = true>
struct IndexedMemPool : boost::noncopyable {
typedef T value_type;
// of bits required to hold indices from a pool, given its capacity
static constexpr uint32_t maxIndexForCapacity(uint32_t capacity) {
- // index of uint32_t(-1) == UINT32_MAX is reserved for isAllocated tracking
- return std::min(uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
- uint64_t(uint32_t(-1) - 1));
+ // index of std::numeric_limits<uint32_t>::max() is reserved for isAllocated
+ // tracking
+ return uint32_t(std::min(
+ uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
+ uint64_t(std::numeric_limits<uint32_t>::max() - 1)));
}
static constexpr uint32_t capacityForMaxIndex(uint32_t maxIndex) {
, globalHead_(TaggedPtr{})
{
const size_t needed = sizeof(Slot) * (actualCapacity_ + 1);
- size_t pagesize = sysconf(_SC_PAGESIZE);
+ size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
mmapLength_ = ((needed - 1) & ~(pagesize - 1)) + pagesize;
assert(needed <= mmapLength_ && mmapLength_ < needed + pagesize);
assert((mmapLength_ % pagesize) == 0);
/// Destroys all of the contained elements
~IndexedMemPool() {
if (!eagerRecycle()) {
- for (size_t i = size_; i > 0; --i) {
+ for (uint32_t i = size_; i > 0; --i) {
slots_[i].~Slot();
}
}
/// simultaneously allocated and not yet recycled. Because of the
/// local lists it is possible that more elements than this are returned
/// successfully
- size_t capacity() {
+ uint32_t capacity() {
return capacityForMaxIndex(actualCapacity_);
}
auto slot = reinterpret_cast<const Slot*>(
reinterpret_cast<const char*>(elem) - offsetof(Slot, elem));
- auto rv = slot - slots_;
+ auto rv = uint32_t(slot - slots_);
// this assert also tests that rv is in range
assert(elem == &(*this)[rv]);
////////// fields
+ /// the number of bytes allocated from mmap, which is a multiple of
+ /// the page size of the machine
+ size_t mmapLength_;
+
/// the actual number of slots that we will allocate, to guarantee
/// that we will satisfy the capacity requested at construction time.
/// They will be numbered 1..actualCapacity_ (note the 1-based counting),
/// and occupy slots_[1..actualCapacity_].
- size_t actualCapacity_;
-
- /// the number of bytes allocated from mmap, which is a multiple of
- /// the page size of the machine
- size_t mmapLength_;
+ uint32_t actualCapacity_;
/// this records the number of slots that have actually been constructed.
/// To allow use of atomic ++ instead of CAS, we let this overflow.
///////////// private methods
- size_t slotIndex(uint32_t idx) const {
+ uint32_t slotIndex(uint32_t idx) const {
assert(0 < idx &&
idx <= actualCapacity_ &&
idx <= size_.load(std::memory_order_acquire));
} // namespace folly
# pragma GCC diagnostic pop
-#endif