DIRECTORY io/async/ssl/test/
TEST ssl_errors_test SOURCES SSLErrorsTest.cpp
+ DIRECTORY memory/test/
+ TEST thread_cached_arena_test SOURCES ThreadCachedArenaTest.cpp
+ TEST arena_test SOURCES ArenaTest.cpp
+
DIRECTORY portability/test/
TEST constexpr_test SOURCES ConstexprTest.cpp
TEST libgen-test SOURCES LibgenTest.cpp
DIRECTORY test/
TEST ahm_int_stress_test SOURCES AHMIntStressTest.cpp
- TEST arena_test SOURCES ArenaTest.cpp
TEST arena_smartptr_test SOURCES ArenaSmartPtrTest.cpp
TEST array_test SOURCES ArrayTest.cpp
TEST ascii_check_test SOURCES AsciiCaseInsensitiveTest.cpp
TEST sparse_byte_set_test SOURCES SparseByteSetTest.cpp
TEST string_test SOURCES StringTest.cpp
TEST synchronized_test SOURCES SynchronizedTest.cpp
- TEST thread_cached_arena_test SOURCES ThreadCachedArenaTest.cpp
TEST thread_cached_int_test SOURCES ThreadCachedIntTest.cpp
TEST thread_local_test SOURCES ThreadLocalTest.cpp
TEST timeout_queue_test SOURCES TimeoutQueueTest.cpp
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FOLLY_ARENA_H_
-#error This file may only be included from Arena.h
-#endif
-
-// Implementation of Arena.h functions
-
-namespace folly {
-
-template <class Alloc>
-std::pair<typename Arena<Alloc>::Block*, size_t>
-Arena<Alloc>::Block::allocate(Alloc& alloc, size_t size, bool allowSlack) {
- size_t allocSize = sizeof(Block) + size;
- if (allowSlack) {
- allocSize = ArenaAllocatorTraits<Alloc>::goodSize(alloc, allocSize);
- }
-
- void* mem = alloc.allocate(allocSize);
- return std::make_pair(new (mem) Block(), allocSize - sizeof(Block));
-}
-
-template <class Alloc>
-void Arena<Alloc>::Block::deallocate(Alloc& alloc) {
- this->~Block();
- alloc.deallocate(this);
-}
-
-template <class Alloc>
-void* Arena<Alloc>::allocateSlow(size_t size) {
- std::pair<Block*, size_t> p;
- char* start;
-
- size_t allocSize = std::max(size, minBlockSize()) + sizeof(Block);
- if (sizeLimit_ != kNoSizeLimit &&
- allocSize > sizeLimit_ - totalAllocatedSize_) {
- throw std::bad_alloc();
- }
-
- if (size > minBlockSize()) {
- // Allocate a large block for this chunk only, put it at the back of the
- // list so it doesn't get used for small allocations; don't change ptr_
- // and end_, let them point into a normal block (or none, if they're
- // null)
- p = Block::allocate(alloc(), size, false);
- start = p.first->start();
- blocks_.push_back(*p.first);
- } else {
- // Allocate a normal sized block and carve out size bytes from it
- p = Block::allocate(alloc(), minBlockSize(), true);
- start = p.first->start();
- blocks_.push_front(*p.first);
- ptr_ = start + size;
- end_ = start + p.second;
- }
-
- assert(p.second >= size);
- totalAllocatedSize_ += p.second + sizeof(Block);
- return start;
-}
-
-template <class Alloc>
-void Arena<Alloc>::merge(Arena<Alloc>&& other) {
- blocks_.splice_after(blocks_.before_begin(), other.blocks_);
- other.blocks_.clear();
- other.ptr_ = other.end_ = nullptr;
- totalAllocatedSize_ += other.totalAllocatedSize_;
- other.totalAllocatedSize_ = 0;
-}
-
-template <class Alloc>
-Arena<Alloc>::~Arena() {
- auto disposer = [this] (Block* b) { b->deallocate(this->alloc()); };
- while (!blocks_.empty()) {
- blocks_.pop_front_and_dispose(disposer);
- }
-}
-
-} // namespace folly
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-#define FOLLY_ARENA_H_
-
-#include <cassert>
-#include <limits>
-#include <stdexcept>
-#include <utility>
-
-#include <boost/intrusive/slist.hpp>
-
-#include <folly/Conv.h>
-#include <folly/Likely.h>
-#include <folly/Malloc.h>
-#include <folly/Memory.h>
-
-namespace folly {
-
-/**
- * Simple arena: allocate memory which gets freed when the arena gets
- * destroyed.
- *
- * The arena itself allocates memory using a custom allocator which provides
- * the following interface (same as required by StlAllocator in StlAllocator.h)
- *
- * void* allocate(size_t size);
- * Allocate a block of size bytes, properly aligned to the maximum
- * alignment required on your system; throw std::bad_alloc if the
- * allocation can't be satisfied.
- *
- * void deallocate(void* ptr);
- * Deallocate a previously allocated block.
- *
- * You may also specialize ArenaAllocatorTraits for your allocator type to
- * provide:
- *
- * size_t goodSize(const Allocator& alloc, size_t size) const;
- * Return a size (>= the provided size) that is considered "good" for your
- * allocator (for example, if your allocator allocates memory in 4MB
- * chunks, size should be rounded up to 4MB). The provided value is
- * guaranteed to be rounded up to a multiple of the maximum alignment
- * required on your system; the returned value must be also.
- *
- * An implementation that uses malloc() / free() is defined below, see SysArena.
- */
-template <class Alloc> struct ArenaAllocatorTraits;
-template <class Alloc>
-class Arena {
- public:
- explicit Arena(const Alloc& alloc,
- size_t minBlockSize = kDefaultMinBlockSize,
- size_t sizeLimit = kNoSizeLimit,
- size_t maxAlign = kDefaultMaxAlign)
- : allocAndSize_(alloc, minBlockSize)
- , ptr_(nullptr)
- , end_(nullptr)
- , totalAllocatedSize_(0)
- , bytesUsed_(0)
- , sizeLimit_(sizeLimit)
- , maxAlign_(maxAlign) {
- if ((maxAlign_ & (maxAlign_ - 1)) || maxAlign_ > alignof(Block)) {
- throw std::invalid_argument(
- folly::to<std::string>("Invalid maxAlign: ", maxAlign_));
- }
- }
-
- ~Arena();
-
- void* allocate(size_t size) {
- size = roundUp(size);
- bytesUsed_ += size;
-
- assert(ptr_ <= end_);
- if (LIKELY((size_t)(end_ - ptr_) >= size)) {
- // Fast path: there's enough room in the current block
- char* r = ptr_;
- ptr_ += size;
- assert(isAligned(r));
- return r;
- }
-
- // Not enough room in the current block
- void* r = allocateSlow(size);
- assert(isAligned(r));
- return r;
- }
-
- void deallocate(void* /* p */) {
- // Deallocate? Never!
- }
-
- // Transfer ownership of all memory allocated from "other" to "this".
- void merge(Arena&& other);
-
- // Gets the total memory used by the arena
- size_t totalSize() const {
- return totalAllocatedSize_ + sizeof(Arena);
- }
-
- // Gets the total number of "used" bytes, i.e. bytes that the arena users
- // allocated via the calls to `allocate`. Doesn't include fragmentation, e.g.
- // if block size is 4KB and you allocate 2 objects of 3KB in size,
- // `bytesUsed()` will be 6KB, while `totalSize()` will be 8KB+.
- size_t bytesUsed() const {
- return bytesUsed_;
- }
-
- // not copyable
- Arena(const Arena&) = delete;
- Arena& operator=(const Arena&) = delete;
-
- // movable
- Arena(Arena&&) = default;
- Arena& operator=(Arena&&) = default;
-
- private:
- struct Block;
- typedef boost::intrusive::slist_member_hook<
- boost::intrusive::tag<Arena>> BlockLink;
-
- struct FOLLY_ALIGNED_MAX Block {
- BlockLink link;
-
- // Allocate a block with at least size bytes of storage.
- // If allowSlack is true, allocate more than size bytes if convenient
- // (via ArenaAllocatorTraits::goodSize()) as we'll try to pack small
- // allocations in this block.
- static std::pair<Block*, size_t> allocate(
- Alloc& alloc, size_t size, bool allowSlack);
- void deallocate(Alloc& alloc);
-
- char* start() {
- return reinterpret_cast<char*>(this + 1);
- }
-
- private:
- Block() = default;
- ~Block() = default;
- };
-
- public:
- static constexpr size_t kDefaultMinBlockSize = 4096 - sizeof(Block);
- static constexpr size_t kNoSizeLimit = 0;
- static constexpr size_t kDefaultMaxAlign = alignof(Block);
- static constexpr size_t kBlockOverhead = sizeof(Block);
-
- private:
- bool isAligned(uintptr_t address) const {
- return (address & (maxAlign_ - 1)) == 0;
- }
- bool isAligned(void* p) const {
- return isAligned(reinterpret_cast<uintptr_t>(p));
- }
-
- // Round up size so it's properly aligned
- size_t roundUp(size_t size) const {
- return (size + maxAlign_ - 1) & ~(maxAlign_ - 1);
- }
-
- // cache_last<true> makes the list keep a pointer to the last element, so we
- // have push_back() and constant time splice_after()
- typedef boost::intrusive::slist<
- Block,
- boost::intrusive::member_hook<Block, BlockLink, &Block::link>,
- boost::intrusive::constant_time_size<false>,
- boost::intrusive::cache_last<true>> BlockList;
-
- void* allocateSlow(size_t size);
-
- // Empty member optimization: package Alloc with a non-empty member
- // in case Alloc is empty (as it is in the case of SysAlloc).
- struct AllocAndSize : public Alloc {
- explicit AllocAndSize(const Alloc& a, size_t s)
- : Alloc(a), minBlockSize(s) {
- }
-
- size_t minBlockSize;
- };
-
- size_t minBlockSize() const {
- return allocAndSize_.minBlockSize;
- }
- Alloc& alloc() { return allocAndSize_; }
- const Alloc& alloc() const { return allocAndSize_; }
-
- AllocAndSize allocAndSize_;
- BlockList blocks_;
- char* ptr_;
- char* end_;
- size_t totalAllocatedSize_;
- size_t bytesUsed_;
- const size_t sizeLimit_;
- const size_t maxAlign_;
-};
-
-template <class Alloc>
-struct IsArenaAllocator<Arena<Alloc>> : std::true_type { };
-
-/**
- * By default, don't pad the given size.
- */
-template <class Alloc>
-struct ArenaAllocatorTraits {
- static size_t goodSize(const Alloc& /* alloc */, size_t size) { return size; }
-};
-
-template <>
-struct ArenaAllocatorTraits<SysAlloc> {
- static size_t goodSize(const SysAlloc& /* alloc */, size_t size) {
- return goodMallocSize(size);
- }
-};
-
-/**
- * Arena that uses the system allocator (malloc / free)
- */
-class SysArena : public Arena<SysAlloc> {
- public:
- explicit SysArena(size_t minBlockSize = kDefaultMinBlockSize,
- size_t sizeLimit = kNoSizeLimit,
- size_t maxAlign = kDefaultMaxAlign)
- : Arena<SysAlloc>(SysAlloc(), minBlockSize, sizeLimit, maxAlign) {
- }
-};
-
-template <>
-struct IsArenaAllocator<SysArena> : std::true_type { };
-
-} // namespace folly
-
-#include <folly/Arena-inl.h>
#include <algorithm>
#include <cstring>
-#include <folly/Malloc.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/Config.h>
#if FOLLY_HAVE_CPLUS_DEMANGLE_V3_CALLBACK
#include <utility>
#include <folly/Hash.h>
-#include <folly/Malloc.h>
#include <folly/Traits.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/BitsFunctexcept.h>
// When used in folly, assertions are not disabled.
#include <folly/FormatTraits.h>
#include <folly/Likely.h>
-#include <folly/Malloc.h>
#include <folly/Traits.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/BitsFunctexcept.h>
//=============================================================================
follyincludedir = $(includedir)/folly
nobase_follyinclude_HEADERS = \
- Arena.h \
- Arena-inl.h \
Array.h \
Assume.h \
AtomicBitSet.h \
detail/GroupVarintDetail.h \
detail/IPAddress.h \
detail/IPAddressSource.h \
- detail/MallocImpl.h \
detail/MemoryIdler.h \
detail/MPMCPipelineDetail.h \
detail/RangeCommon.h \
LockTraitsBoost.h \
Logging.h \
MacAddress.h \
- Malloc.h \
MapUtil.h \
Math.h \
Memory.h \
+ memory/Arena.h \
+ memory/Arena-inl.h \
memory/MallctlHelper.h \
+ memory/Malloc.h \
+ memory/ThreadCachedArena.h \
memory/UninitializedMemoryHacks.h \
+ memory/detail/MallocImpl.h \
MicroSpinLock.h \
MicroLock.h \
MoveWrapper.h \
test/SynchronizedTestLib.h \
test/SynchronizedTestLib-inl.h \
test/TestUtils.h \
- ThreadCachedArena.h \
ThreadCachedInt.h \
ThreadLocal.h \
TimeoutQueue.h \
detail/MemoryIdler.cpp \
detail/SocketFastOpen.cpp \
MacAddress.cpp \
+ memory/ThreadCachedArena.cpp \
portability/Dirent.cpp \
portability/Fcntl.cpp \
portability/Libgen.cpp \
system/ThreadName.cpp \
system/VersionCheck.cpp \
Subprocess.cpp \
- ThreadCachedArena.cpp \
TimeoutQueue.cpp \
Try.cpp \
Uri.cpp \
endif
if !HAVE_WEAK_SYMBOLS
-libfollybase_la_SOURCES += detail/MallocImpl.cpp
+libfollybase_la_SOURCES += memory/detail/MallocImpl.cpp
endif
if HAVE_BOOST_CONTEXT
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Functions to provide smarter use of jemalloc, if jemalloc is being used.
-// http://www.canonware.com/download/jemalloc/jemalloc-latest/doc/jemalloc.html
-
-#pragma once
-
-#include <folly/portability/Config.h>
-
-/**
- * Define various MALLOCX_* macros normally provided by jemalloc. We define
- * them so that we don't have to include jemalloc.h, in case the program is
- * built without jemalloc support.
- */
-#if defined(USE_JEMALLOC) || defined(FOLLY_USE_JEMALLOC)
-// We have JEMalloc, so use it.
-# include <jemalloc/jemalloc.h>
-#else
-# ifndef MALLOCX_LG_ALIGN
-# define MALLOCX_LG_ALIGN(la) (la)
-# endif
-# ifndef MALLOCX_ZERO
-# define MALLOCX_ZERO (static_cast<int>(0x40))
-# endif
-#endif
-
-// If using fbstring from libstdc++ (see comment in FBString.h), then
-// just define stub code here to typedef the fbstring type into the
-// folly namespace.
-// This provides backwards compatibility for code that explicitly
-// includes and uses fbstring.
-#if defined(_GLIBCXX_USE_FB) && !defined(_LIBSTDCXX_FBSTRING)
-
-#include <folly/detail/MallocImpl.h>
-#include <folly/portability/BitsFunctexcept.h>
-
-#include <string>
-
-namespace folly {
- using std::goodMallocSize;
- using std::jemallocMinInPlaceExpandable;
- using std::usingJEMalloc;
- using std::smartRealloc;
- using std::checkedMalloc;
- using std::checkedCalloc;
- using std::checkedRealloc;
-}
-
-#else // !defined(_GLIBCXX_USE_FB) || defined(_LIBSTDCXX_FBSTRING)
-
-#ifdef _LIBSTDCXX_FBSTRING
-#pragma GCC system_header
-
-/**
- * Declare *allocx() and mallctl*() as weak symbols. These will be provided by
- * jemalloc if we are using jemalloc, or will be nullptr if we are using another
- * malloc implementation.
- */
-extern "C" void* mallocx(size_t, int)
-__attribute__((__weak__));
-extern "C" void* rallocx(void*, size_t, int)
-__attribute__((__weak__));
-extern "C" size_t xallocx(void*, size_t, size_t, int)
-__attribute__((__weak__));
-extern "C" size_t sallocx(const void*, int)
-__attribute__((__weak__));
-extern "C" void dallocx(void*, int)
-__attribute__((__weak__));
-extern "C" void sdallocx(void*, size_t, int)
-__attribute__((__weak__));
-extern "C" size_t nallocx(size_t, int)
-__attribute__((__weak__));
-extern "C" int mallctl(const char*, void*, size_t*, void*, size_t)
-__attribute__((__weak__));
-extern "C" int mallctlnametomib(const char*, size_t*, size_t*)
-__attribute__((__weak__));
-extern "C" int mallctlbymib(const size_t*, size_t, void*, size_t*, void*,
- size_t)
-__attribute__((__weak__));
-
-#include <bits/functexcept.h>
-
-#define FOLLY_HAVE_MALLOC_H 1
-
-#else // !defined(_LIBSTDCXX_FBSTRING)
-
-#include <folly/detail/MallocImpl.h> /* nolint */
-#include <folly/portability/BitsFunctexcept.h> /* nolint */
-
-#endif
-
-// for malloc_usable_size
-// NOTE: FreeBSD 9 doesn't have malloc.h. Its definitions
-// are found in stdlib.h.
-#if FOLLY_HAVE_MALLOC_H
-#include <malloc.h>
-#else
-#include <stdlib.h>
-#endif
-
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <cstdlib>
-#include <cstring>
-
-#include <atomic>
-#include <new>
-
-#ifdef _LIBSTDCXX_FBSTRING
-namespace std _GLIBCXX_VISIBILITY(default) {
-_GLIBCXX_BEGIN_NAMESPACE_VERSION
-#else
-namespace folly {
-#endif
-
-// Cannot depend on Portability.h when _LIBSTDCXX_FBSTRING.
-#if defined(__GNUC__)
-#define FOLLY_MALLOC_NOINLINE __attribute__((__noinline__))
-#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL) >= 40900
-// This is for checked malloc-like functions (returns non-null pointer
-// which cannot alias any outstanding pointer).
-#define FOLLY_MALLOC_CHECKED_MALLOC \
- __attribute__((__returns_nonnull__, __malloc__))
-#else
-#define FOLLY_MALLOC_CHECKED_MALLOC __attribute__((__malloc__))
-#endif
-#else
-#define FOLLY_MALLOC_NOINLINE
-#define FOLLY_MALLOC_CHECKED_MALLOC
-#endif
-
-/**
- * Determine if we are using jemalloc or not.
- */
-FOLLY_MALLOC_NOINLINE inline bool usingJEMalloc() noexcept {
- // Checking for rallocx != nullptr is not sufficient; we may be in a
- // dlopen()ed module that depends on libjemalloc, so rallocx is resolved, but
- // the main program might be using a different memory allocator.
- // How do we determine that we're using jemalloc? In the hackiest
- // way possible. We allocate memory using malloc() and see if the
- // per-thread counter of allocated memory increases. This makes me
- // feel dirty inside. Also note that this requires jemalloc to have
- // been compiled with --enable-stats.
- static const bool result = [] () noexcept {
- // Some platforms (*cough* OSX *cough*) require weak symbol checks to be
- // in the form if (mallctl != nullptr). Not if (mallctl) or if (!mallctl)
- // (!!). http://goo.gl/xpmctm
- if (mallocx == nullptr || rallocx == nullptr || xallocx == nullptr
- || sallocx == nullptr || dallocx == nullptr || sdallocx == nullptr
- || nallocx == nullptr || mallctl == nullptr
- || mallctlnametomib == nullptr || mallctlbymib == nullptr) {
- return false;
- }
-
- // "volatile" because gcc optimizes out the reads from *counter, because
- // it "knows" malloc doesn't modify global state...
- /* nolint */ volatile uint64_t* counter;
- size_t counterLen = sizeof(uint64_t*);
-
- if (mallctl("thread.allocatedp", static_cast<void*>(&counter), &counterLen,
- nullptr, 0) != 0) {
- return false;
- }
-
- if (counterLen != sizeof(uint64_t*)) {
- return false;
- }
-
- uint64_t origAllocated = *counter;
-
- const void* ptr = malloc(1);
- if (!ptr) {
- // wtf, failing to allocate 1 byte
- return false;
- }
-
- /* Avoid optimizing away the malloc. */
- asm volatile("" ::"m"(ptr) : "memory");
-
- return (origAllocated != *counter);
- }();
-
- return result;
-}
-
-inline size_t goodMallocSize(size_t minSize) noexcept {
- if (minSize == 0) {
- return 0;
- }
-
- if (!usingJEMalloc()) {
- // Not using jemalloc - no smarts
- return minSize;
- }
-
- return nallocx(minSize, 0);
-}
-
-// We always request "good" sizes for allocation, so jemalloc can
-// never grow in place small blocks; they're already occupied to the
-// brim. Blocks larger than or equal to 4096 bytes can in fact be
-// expanded in place, and this constant reflects that.
-static const size_t jemallocMinInPlaceExpandable = 4096;
-
-/**
- * Trivial wrappers around malloc, calloc, realloc that check for allocation
- * failure and throw std::bad_alloc in that case.
- */
-inline void* checkedMalloc(size_t size) {
- void* p = malloc(size);
- if (!p) {
- std::__throw_bad_alloc();
- }
- return p;
-}
-
-inline void* checkedCalloc(size_t n, size_t size) {
- void* p = calloc(n, size);
- if (!p) {
- std::__throw_bad_alloc();
- }
- return p;
-}
-
-inline void* checkedRealloc(void* ptr, size_t size) {
- void* p = realloc(ptr, size);
- if (!p) {
- std::__throw_bad_alloc();
- }
- return p;
-}
-
-/**
- * This function tries to reallocate a buffer of which only the first
- * currentSize bytes are used. The problem with using realloc is that
- * if currentSize is relatively small _and_ if realloc decides it
- * needs to move the memory chunk to a new buffer, then realloc ends
- * up copying data that is not used. It's generally not a win to try
- * to hook in to realloc() behavior to avoid copies - at least in
- * jemalloc, realloc() almost always ends up doing a copy, because
- * there is little fragmentation / slack space to take advantage of.
- */
-FOLLY_MALLOC_CHECKED_MALLOC FOLLY_MALLOC_NOINLINE inline void* smartRealloc(
- void* p,
- const size_t currentSize,
- const size_t currentCapacity,
- const size_t newCapacity) {
- assert(p);
- assert(currentSize <= currentCapacity &&
- currentCapacity < newCapacity);
-
- auto const slack = currentCapacity - currentSize;
- if (slack * 2 > currentSize) {
- // Too much slack, malloc-copy-free cycle:
- auto const result = checkedMalloc(newCapacity);
- std::memcpy(result, p, currentSize);
- free(p);
- return result;
- }
- // If there's not too much slack, we realloc in hope of coalescing
- return checkedRealloc(p, newCapacity);
-}
-
-#ifdef _LIBSTDCXX_FBSTRING
-_GLIBCXX_END_NAMESPACE_VERSION
-#endif
-
-} // namespace folly
-
-#endif // !defined(_GLIBCXX_USE_FB) || defined(_LIBSTDCXX_FBSTRING)
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <folly/ThreadCachedArena.h>
-
-#include <memory>
-
-namespace folly {
-
-ThreadCachedArena::ThreadCachedArena(size_t minBlockSize, size_t maxAlign)
- : minBlockSize_(minBlockSize), maxAlign_(maxAlign) {
-}
-
-SysArena* ThreadCachedArena::allocateThreadLocalArena() {
- SysArena* arena =
- new SysArena(minBlockSize_, SysArena::kNoSizeLimit, maxAlign_);
- auto disposer = [this] (SysArena* t, TLPDestructionMode mode) {
- std::unique_ptr<SysArena> tp(t); // ensure it gets deleted
- if (mode == TLPDestructionMode::THIS_THREAD) {
- zombify(std::move(*t));
- }
- };
- arena_.reset(arena, disposer);
- return arena;
-}
-
-void ThreadCachedArena::zombify(SysArena&& arena) {
- zombies_->merge(std::move(arena));
-}
-
-size_t ThreadCachedArena::totalSize() const {
- size_t result = sizeof(ThreadCachedArena);
- for (const auto& arena : arena_.accessAllThreads()) {
- result += arena.totalSize();
- }
- result += zombies_->totalSize() - sizeof(SysArena);
- return result;
-}
-
-} // namespace folly
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <type_traits>
-
-#include <folly/Arena.h>
-#include <folly/Likely.h>
-#include <folly/Synchronized.h>
-#include <folly/ThreadLocal.h>
-
-namespace folly {
-
-/**
- * Thread-caching arena: allocate memory which gets freed when the arena gets
- * destroyed.
- *
- * The arena itself allocates memory using malloc() in blocks of
- * at least minBlockSize bytes.
- *
- * For speed, each thread gets its own Arena (see Arena.h); when threads
- * exit, the Arena gets merged into a "zombie" Arena, which will be deallocated
- * when the ThreadCachedArena object is destroyed.
- */
-class ThreadCachedArena {
- public:
- explicit ThreadCachedArena(
- size_t minBlockSize = SysArena::kDefaultMinBlockSize,
- size_t maxAlign = SysArena::kDefaultMaxAlign);
-
- void* allocate(size_t size) {
- SysArena* arena = arena_.get();
- if (UNLIKELY(!arena)) {
- arena = allocateThreadLocalArena();
- }
-
- return arena->allocate(size);
- }
-
- void deallocate(void* /* p */) {
- // Deallocate? Never!
- }
-
- // Gets the total memory used by the arena
- size_t totalSize() const;
-
- private:
- struct ThreadLocalPtrTag {};
-
- ThreadCachedArena(const ThreadCachedArena&) = delete;
- ThreadCachedArena(ThreadCachedArena&&) = delete;
- ThreadCachedArena& operator=(const ThreadCachedArena&) = delete;
- ThreadCachedArena& operator=(ThreadCachedArena&&) = delete;
-
- SysArena* allocateThreadLocalArena();
-
- // Zombify the blocks in arena, saving them for deallocation until
- // the ThreadCachedArena is destroyed.
- void zombify(SysArena&& arena);
-
- const size_t minBlockSize_;
- const size_t maxAlign_;
-
- ThreadLocalPtr<SysArena, ThreadLocalPtrTag> arena_; // Per-thread arena.
-
- // Allocations from threads that are now dead.
- Synchronized<SysArena> zombies_;
-};
-
-template <>
-struct IsArenaAllocator<ThreadCachedArena> : std::true_type { };
-
-} // namespace folly
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <folly/detail/MallocImpl.h>
-
-extern "C" {
-
-#ifdef _MSC_VER
-// MSVC doesn't have weak symbols, so do some linker magic
-// to emulate them. (the magic is in the header)
-const char* mallocxWeak = nullptr;
-const char* rallocxWeak = nullptr;
-const char* xallocxWeak = nullptr;
-const char* sallocxWeak = nullptr;
-const char* dallocxWeak = nullptr;
-const char* sdallocxWeak = nullptr;
-const char* nallocxWeak = nullptr;
-const char* mallctlWeak = nullptr;
-const char* mallctlnametomibWeak = nullptr;
-const char* mallctlbymibWeak = nullptr;
-#elif !FOLLY_HAVE_WEAK_SYMBOLS
-void* (*mallocx)(size_t, int) = nullptr;
-void* (*rallocx)(void*, size_t, int) = nullptr;
-size_t (*xallocx)(void*, size_t, size_t, int) = nullptr;
-size_t (*sallocx)(const void*, int) = nullptr;
-void (*dallocx)(void*, int) = nullptr;
-void (*sdallocx)(void*, size_t, int) = nullptr;
-size_t (*nallocx)(size_t, int) = nullptr;
-int (*mallctl)(const char*, void*, size_t*, void*, size_t) = nullptr;
-int (*mallctlnametomib)(const char*, size_t*, size_t*) = nullptr;
-int (*mallctlbymib)(const size_t*, size_t, void*, size_t*, void*, size_t) =
- nullptr;
-#endif
-
-}
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <stdlib.h>
-
-#include <folly/Portability.h>
-
-extern "C" {
-
-#if FOLLY_HAVE_WEAK_SYMBOLS
-void* mallocx(size_t, int) __attribute__((__weak__));
-void* rallocx(void*, size_t, int) __attribute__((__weak__));
-size_t xallocx(void*, size_t, size_t, int) __attribute__((__weak__));
-size_t sallocx(const void*, int) __attribute__((__weak__));
-void dallocx(void*, int) __attribute__((__weak__));
-void sdallocx(void*, size_t, int) __attribute__((__weak__));
-size_t nallocx(size_t, int) __attribute__((__weak__));
-int mallctl(const char*, void*, size_t*, void*, size_t)
- __attribute__((__weak__));
-int mallctlnametomib(const char*, size_t*, size_t*) __attribute__((__weak__));
-int mallctlbymib(const size_t*, size_t, void*, size_t*, void*, size_t)
- __attribute__((__weak__));
-#else
-extern void* (*mallocx)(size_t, int);
-extern void* (*rallocx)(void*, size_t, int);
-extern size_t (*xallocx)(void*, size_t, size_t, int);
-extern size_t (*sallocx)(const void*, int);
-extern void (*dallocx)(void*, int);
-extern void (*sdallocx)(void*, size_t, int);
-extern size_t (*nallocx)(size_t, int);
-extern int (*mallctl)(const char*, void*, size_t*, void*, size_t);
-extern int (*mallctlnametomib)(const char*, size_t*, size_t*);
-extern int (*mallctlbymib)(const size_t*, size_t, void*, size_t*, void*,
- size_t);
-#ifdef _MSC_VER
-// We emulate weak linkage for MSVC. The symbols we're
-// aliasing to are hiding in MallocImpl.cpp
-#pragma comment(linker, "/alternatename:mallocx=mallocxWeak")
-#pragma comment(linker, "/alternatename:rallocx=rallocxWeak")
-#pragma comment(linker, "/alternatename:xallocx=xallocxWeak")
-#pragma comment(linker, "/alternatename:sallocx=sallocxWeak")
-#pragma comment(linker, "/alternatename:dallocx=dallocxWeak")
-#pragma comment(linker, "/alternatename:sdallocx=sdallocxWeak")
-#pragma comment(linker, "/alternatename:nallocx=nallocxWeak")
-#pragma comment(linker, "/alternatename:mallctl=mallctlWeak")
-#pragma comment(linker, "/alternatename:mallctlnametomib=mallctlnametomibWeak")
-#pragma comment(linker, "/alternatename:mallctlbymib=mallctlbymibWeak")
-#endif
-#endif
-
-}
#include <folly/detail/MemoryIdler.h>
#include <folly/Logging.h>
-#include <folly/Malloc.h>
#include <folly/Portability.h>
#include <folly/ScopeGuard.h>
#include <folly/concurrency/CacheLocality.h>
#include <folly/memory/MallctlHelper.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/PThread.h>
#include <folly/portability/SysMman.h>
#include <folly/portability/Unistd.h>
#include <folly/Exception.h>
#include <folly/Foreach.h>
#include <folly/Function.h>
-#include <folly/Malloc.h>
#include <folly/MicroSpinLock.h>
#include <folly/Portability.h>
#include <folly/ScopeGuard.h>
#include <folly/SharedMutex.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/PThread.h>
#include <folly/detail/StaticSingletonManager.h>
#include <folly/experimental/JemallocNodumpAllocator.h>
#include <folly/Conv.h>
-#include <folly/Malloc.h>
#include <folly/String.h>
+#include <folly/memory/Malloc.h>
#include <glog/logging.h>
namespace folly {
#include <folly/experimental/JemallocNodumpAllocator.h>
-#include <folly/Malloc.h>
#include <folly/io/IOBuf.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/GTest.h>
TEST(JemallocNodumpAllocatorTest, Basic) {
#include <folly/Conv.h>
#include <folly/Likely.h>
-#include <folly/Malloc.h>
#include <folly/Memory.h>
#include <folly/ScopeGuard.h>
#include <folly/hash/SpookyHashV2.h>
#include <folly/io/Cursor.h>
+#include <folly/memory/Malloc.h>
using std::unique_ptr;
// - If using jemalloc, we can try to expand in place, avoiding a memcpy()
// - If not using jemalloc and we don't have too much to copy,
// we'll use realloc() (note that realloc might have to copy
- // headroom + data + tailroom, see smartRealloc in folly/Malloc.h)
+ // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h)
// - Otherwise, bite the bullet and reallocate.
if (headroom() + tailroom() >= minHeadroom + minTailroom) {
uint8_t* newData = writableBuffer() + minHeadroom;
#include <iterator>
#include <type_traits>
-#include <folly/Malloc.h>
#include <folly/io/IOBuf.h>
+#include <folly/memory/Malloc.h>
namespace folly {
#include <boost/random.hpp>
-#include <folly/Malloc.h>
#include <folly/Range.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/GTest.h>
using folly::fbstring;
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FOLLY_ARENA_H_
+#error This file may only be included from Arena.h
+#endif
+
+// Implementation of Arena.h functions
+
+namespace folly {
+
+template <class Alloc>
+std::pair<typename Arena<Alloc>::Block*, size_t>
+Arena<Alloc>::Block::allocate(Alloc& alloc, size_t size, bool allowSlack) {
+ size_t allocSize = sizeof(Block) + size;
+ if (allowSlack) {
+ allocSize = ArenaAllocatorTraits<Alloc>::goodSize(alloc, allocSize);
+ }
+
+ void* mem = alloc.allocate(allocSize);
+ return std::make_pair(new (mem) Block(), allocSize - sizeof(Block));
+}
+
+template <class Alloc>
+void Arena<Alloc>::Block::deallocate(Alloc& alloc) {
+ this->~Block();
+ alloc.deallocate(this);
+}
+
+template <class Alloc>
+void* Arena<Alloc>::allocateSlow(size_t size) {
+ std::pair<Block*, size_t> p;
+ char* start;
+
+ size_t allocSize = std::max(size, minBlockSize()) + sizeof(Block);
+ if (sizeLimit_ != kNoSizeLimit &&
+ allocSize > sizeLimit_ - totalAllocatedSize_) {
+ throw std::bad_alloc();
+ }
+
+ if (size > minBlockSize()) {
+ // Allocate a large block for this chunk only, put it at the back of the
+ // list so it doesn't get used for small allocations; don't change ptr_
+ // and end_, let them point into a normal block (or none, if they're
+ // null)
+ p = Block::allocate(alloc(), size, false);
+ start = p.first->start();
+ blocks_.push_back(*p.first);
+ } else {
+ // Allocate a normal sized block and carve out size bytes from it
+ p = Block::allocate(alloc(), minBlockSize(), true);
+ start = p.first->start();
+ blocks_.push_front(*p.first);
+ ptr_ = start + size;
+ end_ = start + p.second;
+ }
+
+ assert(p.second >= size);
+ totalAllocatedSize_ += p.second + sizeof(Block);
+ return start;
+}
+
+template <class Alloc>
+void Arena<Alloc>::merge(Arena<Alloc>&& other) {
+ blocks_.splice_after(blocks_.before_begin(), other.blocks_);
+ other.blocks_.clear();
+ other.ptr_ = other.end_ = nullptr;
+ totalAllocatedSize_ += other.totalAllocatedSize_;
+ other.totalAllocatedSize_ = 0;
+}
+
+template <class Alloc>
+Arena<Alloc>::~Arena() {
+ auto disposer = [this] (Block* b) { b->deallocate(this->alloc()); };
+ while (!blocks_.empty()) {
+ blocks_.pop_front_and_dispose(disposer);
+ }
+}
+
+} // namespace folly
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+#define FOLLY_ARENA_H_
+
+#include <cassert>
+#include <limits>
+#include <stdexcept>
+#include <utility>
+
+#include <boost/intrusive/slist.hpp>
+
+#include <folly/Conv.h>
+#include <folly/Likely.h>
+#include <folly/Memory.h>
+#include <folly/memory/Malloc.h>
+
+namespace folly {
+
+/**
+ * Simple arena: allocate memory which gets freed when the arena gets
+ * destroyed.
+ *
+ * The arena itself allocates memory using a custom allocator which provides
+ * the following interface (same as required by StlAllocator in StlAllocator.h)
+ *
+ * void* allocate(size_t size);
+ * Allocate a block of size bytes, properly aligned to the maximum
+ * alignment required on your system; throw std::bad_alloc if the
+ * allocation can't be satisfied.
+ *
+ * void deallocate(void* ptr);
+ * Deallocate a previously allocated block.
+ *
+ * You may also specialize ArenaAllocatorTraits for your allocator type to
+ * provide:
+ *
+ * size_t goodSize(const Allocator& alloc, size_t size) const;
+ * Return a size (>= the provided size) that is considered "good" for your
+ * allocator (for example, if your allocator allocates memory in 4MB
+ * chunks, size should be rounded up to 4MB). The provided value is
+ * guaranteed to be rounded up to a multiple of the maximum alignment
+ * required on your system; the returned value must be also.
+ *
+ * An implementation that uses malloc() / free() is defined below, see SysArena.
+ */
+template <class Alloc> struct ArenaAllocatorTraits;
+template <class Alloc>
+class Arena {
+ public:
+ explicit Arena(const Alloc& alloc,
+ size_t minBlockSize = kDefaultMinBlockSize,
+ size_t sizeLimit = kNoSizeLimit,
+ size_t maxAlign = kDefaultMaxAlign)
+ : allocAndSize_(alloc, minBlockSize)
+ , ptr_(nullptr)
+ , end_(nullptr)
+ , totalAllocatedSize_(0)
+ , bytesUsed_(0)
+ , sizeLimit_(sizeLimit)
+ , maxAlign_(maxAlign) {
+ if ((maxAlign_ & (maxAlign_ - 1)) || maxAlign_ > alignof(Block)) {
+ throw std::invalid_argument(
+ folly::to<std::string>("Invalid maxAlign: ", maxAlign_));
+ }
+ }
+
+ ~Arena();
+
+ void* allocate(size_t size) {
+ size = roundUp(size);
+ bytesUsed_ += size;
+
+ assert(ptr_ <= end_);
+ if (LIKELY((size_t)(end_ - ptr_) >= size)) {
+ // Fast path: there's enough room in the current block
+ char* r = ptr_;
+ ptr_ += size;
+ assert(isAligned(r));
+ return r;
+ }
+
+ // Not enough room in the current block
+ void* r = allocateSlow(size);
+ assert(isAligned(r));
+ return r;
+ }
+
+ void deallocate(void* /* p */) {
+ // Deallocate? Never!
+ }
+
+ // Transfer ownership of all memory allocated from "other" to "this".
+ void merge(Arena&& other);
+
+ // Gets the total memory used by the arena
+ size_t totalSize() const {
+ return totalAllocatedSize_ + sizeof(Arena);
+ }
+
+ // Gets the total number of "used" bytes, i.e. bytes that the arena users
+ // allocated via the calls to `allocate`. Doesn't include fragmentation, e.g.
+ // if block size is 4KB and you allocate 2 objects of 3KB in size,
+ // `bytesUsed()` will be 6KB, while `totalSize()` will be 8KB+.
+ size_t bytesUsed() const {
+ return bytesUsed_;
+ }
+
+ // not copyable
+ Arena(const Arena&) = delete;
+ Arena& operator=(const Arena&) = delete;
+
+ // movable
+ Arena(Arena&&) = default;
+ Arena& operator=(Arena&&) = default;
+
+ private:
+ struct Block;
+ typedef boost::intrusive::slist_member_hook<
+ boost::intrusive::tag<Arena>> BlockLink;
+
+ struct FOLLY_ALIGNED_MAX Block {
+ BlockLink link;
+
+ // Allocate a block with at least size bytes of storage.
+ // If allowSlack is true, allocate more than size bytes if convenient
+ // (via ArenaAllocatorTraits::goodSize()) as we'll try to pack small
+ // allocations in this block.
+ static std::pair<Block*, size_t> allocate(
+ Alloc& alloc, size_t size, bool allowSlack);
+ void deallocate(Alloc& alloc);
+
+ char* start() {
+ return reinterpret_cast<char*>(this + 1);
+ }
+
+ private:
+ Block() = default;
+ ~Block() = default;
+ };
+
+ public:
+ static constexpr size_t kDefaultMinBlockSize = 4096 - sizeof(Block);
+ static constexpr size_t kNoSizeLimit = 0;
+ static constexpr size_t kDefaultMaxAlign = alignof(Block);
+ static constexpr size_t kBlockOverhead = sizeof(Block);
+
+ private:
+ bool isAligned(uintptr_t address) const {
+ return (address & (maxAlign_ - 1)) == 0;
+ }
+ bool isAligned(void* p) const {
+ return isAligned(reinterpret_cast<uintptr_t>(p));
+ }
+
+ // Round up size so it's properly aligned
+ size_t roundUp(size_t size) const {
+ return (size + maxAlign_ - 1) & ~(maxAlign_ - 1);
+ }
+
+ // cache_last<true> makes the list keep a pointer to the last element, so we
+ // have push_back() and constant time splice_after()
+ typedef boost::intrusive::slist<
+ Block,
+ boost::intrusive::member_hook<Block, BlockLink, &Block::link>,
+ boost::intrusive::constant_time_size<false>,
+ boost::intrusive::cache_last<true>> BlockList;
+
+ void* allocateSlow(size_t size);
+
+ // Empty member optimization: package Alloc with a non-empty member
+ // in case Alloc is empty (as it is in the case of SysAlloc).
+ struct AllocAndSize : public Alloc {
+ explicit AllocAndSize(const Alloc& a, size_t s)
+ : Alloc(a), minBlockSize(s) {
+ }
+
+ size_t minBlockSize;
+ };
+
+ size_t minBlockSize() const {
+ return allocAndSize_.minBlockSize;
+ }
+ Alloc& alloc() { return allocAndSize_; }
+ const Alloc& alloc() const { return allocAndSize_; }
+
+ AllocAndSize allocAndSize_;
+ BlockList blocks_;
+ char* ptr_;
+ char* end_;
+ size_t totalAllocatedSize_;
+ size_t bytesUsed_;
+ const size_t sizeLimit_;
+ const size_t maxAlign_;
+};
+
+template <class Alloc>
+struct IsArenaAllocator<Arena<Alloc>> : std::true_type { };
+
+/**
+ * By default, don't pad the given size.
+ */
+template <class Alloc>
+struct ArenaAllocatorTraits {
+ static size_t goodSize(const Alloc& /* alloc */, size_t size) { return size; }
+};
+
+template <>
+struct ArenaAllocatorTraits<SysAlloc> {
+ static size_t goodSize(const SysAlloc& /* alloc */, size_t size) {
+ return goodMallocSize(size);
+ }
+};
+
+/**
+ * Arena that uses the system allocator (malloc / free)
+ */
+class SysArena : public Arena<SysAlloc> {
+ public:
+ explicit SysArena(size_t minBlockSize = kDefaultMinBlockSize,
+ size_t sizeLimit = kNoSizeLimit,
+ size_t maxAlign = kDefaultMaxAlign)
+ : Arena<SysAlloc>(SysAlloc(), minBlockSize, sizeLimit, maxAlign) {
+ }
+};
+
+template <>
+struct IsArenaAllocator<SysArena> : std::true_type { };
+
+} // namespace folly
+
+#include <folly/memory/Arena-inl.h>
#pragma once
#include <folly/Likely.h>
-#include <folly/Malloc.h>
+#include <folly/memory/Malloc.h>
#include <stdexcept>
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Functions to provide smarter use of jemalloc, if jemalloc is being used.
+// http://www.canonware.com/download/jemalloc/jemalloc-latest/doc/jemalloc.html
+
+#pragma once
+
+#include <folly/portability/Config.h>
+
+/**
+ * Define various MALLOCX_* macros normally provided by jemalloc. We define
+ * them so that we don't have to include jemalloc.h, in case the program is
+ * built without jemalloc support.
+ */
+#if defined(USE_JEMALLOC) || defined(FOLLY_USE_JEMALLOC)
+// We have JEMalloc, so use it.
+# include <jemalloc/jemalloc.h>
+#else
+# ifndef MALLOCX_LG_ALIGN
+# define MALLOCX_LG_ALIGN(la) (la)
+# endif
+# ifndef MALLOCX_ZERO
+# define MALLOCX_ZERO (static_cast<int>(0x40))
+# endif
+#endif
+
+// If using fbstring from libstdc++ (see comment in FBString.h), then
+// just define stub code here to typedef the fbstring type into the
+// folly namespace.
+// This provides backwards compatibility for code that explicitly
+// includes and uses fbstring.
+#if defined(_GLIBCXX_USE_FB) && !defined(_LIBSTDCXX_FBSTRING)
+
+#include <folly/memory/detail/MallocImpl.h>
+#include <folly/portability/BitsFunctexcept.h>
+
+#include <string>
+
+namespace folly {
+ using std::goodMallocSize;
+ using std::jemallocMinInPlaceExpandable;
+ using std::usingJEMalloc;
+ using std::smartRealloc;
+ using std::checkedMalloc;
+ using std::checkedCalloc;
+ using std::checkedRealloc;
+}
+
+#else // !defined(_GLIBCXX_USE_FB) || defined(_LIBSTDCXX_FBSTRING)
+
+#ifdef _LIBSTDCXX_FBSTRING
+#pragma GCC system_header
+
+/**
+ * Declare *allocx() and mallctl*() as weak symbols. These will be provided by
+ * jemalloc if we are using jemalloc, or will be nullptr if we are using another
+ * malloc implementation.
+ */
+extern "C" void* mallocx(size_t, int)
+__attribute__((__weak__));
+extern "C" void* rallocx(void*, size_t, int)
+__attribute__((__weak__));
+extern "C" size_t xallocx(void*, size_t, size_t, int)
+__attribute__((__weak__));
+extern "C" size_t sallocx(const void*, int)
+__attribute__((__weak__));
+extern "C" void dallocx(void*, int)
+__attribute__((__weak__));
+extern "C" void sdallocx(void*, size_t, int)
+__attribute__((__weak__));
+extern "C" size_t nallocx(size_t, int)
+__attribute__((__weak__));
+extern "C" int mallctl(const char*, void*, size_t*, void*, size_t)
+__attribute__((__weak__));
+extern "C" int mallctlnametomib(const char*, size_t*, size_t*)
+__attribute__((__weak__));
+extern "C" int mallctlbymib(const size_t*, size_t, void*, size_t*, void*,
+ size_t)
+__attribute__((__weak__));
+
+#include <bits/functexcept.h>
+
+#define FOLLY_HAVE_MALLOC_H 1
+
+#else // !defined(_LIBSTDCXX_FBSTRING)
+
+#include <folly/memory/detail/MallocImpl.h> /* nolint */
+#include <folly/portability/BitsFunctexcept.h> /* nolint */
+
+#endif
+
+// for malloc_usable_size
+// NOTE: FreeBSD 9 doesn't have malloc.h. Its definitions
+// are found in stdlib.h.
+#if FOLLY_HAVE_MALLOC_H
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+
+#include <atomic>
+#include <new>
+
+#ifdef _LIBSTDCXX_FBSTRING
+namespace std _GLIBCXX_VISIBILITY(default) {
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#else
+namespace folly {
+#endif
+
+// Cannot depend on Portability.h when _LIBSTDCXX_FBSTRING.
+#if defined(__GNUC__)
+#define FOLLY_MALLOC_NOINLINE __attribute__((__noinline__))
+#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL) >= 40900
+// This is for checked malloc-like functions (returns non-null pointer
+// which cannot alias any outstanding pointer).
+#define FOLLY_MALLOC_CHECKED_MALLOC \
+ __attribute__((__returns_nonnull__, __malloc__))
+#else
+#define FOLLY_MALLOC_CHECKED_MALLOC __attribute__((__malloc__))
+#endif
+#else
+#define FOLLY_MALLOC_NOINLINE
+#define FOLLY_MALLOC_CHECKED_MALLOC
+#endif
+
+/**
+ * Determine if we are using jemalloc or not.
+ */
+FOLLY_MALLOC_NOINLINE inline bool usingJEMalloc() noexcept {
+ // Checking for rallocx != nullptr is not sufficient; we may be in a
+ // dlopen()ed module that depends on libjemalloc, so rallocx is resolved, but
+ // the main program might be using a different memory allocator.
+ // How do we determine that we're using jemalloc? In the hackiest
+ // way possible. We allocate memory using malloc() and see if the
+ // per-thread counter of allocated memory increases. This makes me
+ // feel dirty inside. Also note that this requires jemalloc to have
+ // been compiled with --enable-stats.
+ static const bool result = [] () noexcept {
+ // Some platforms (*cough* OSX *cough*) require weak symbol checks to be
+ // in the form if (mallctl != nullptr). Not if (mallctl) or if (!mallctl)
+ // (!!). http://goo.gl/xpmctm
+ if (mallocx == nullptr || rallocx == nullptr || xallocx == nullptr
+ || sallocx == nullptr || dallocx == nullptr || sdallocx == nullptr
+ || nallocx == nullptr || mallctl == nullptr
+ || mallctlnametomib == nullptr || mallctlbymib == nullptr) {
+ return false;
+ }
+
+ // "volatile" because gcc optimizes out the reads from *counter, because
+ // it "knows" malloc doesn't modify global state...
+ /* nolint */ volatile uint64_t* counter;
+ size_t counterLen = sizeof(uint64_t*);
+
+ if (mallctl("thread.allocatedp", static_cast<void*>(&counter), &counterLen,
+ nullptr, 0) != 0) {
+ return false;
+ }
+
+ if (counterLen != sizeof(uint64_t*)) {
+ return false;
+ }
+
+ uint64_t origAllocated = *counter;
+
+ const void* ptr = malloc(1);
+ if (!ptr) {
+ // wtf, failing to allocate 1 byte
+ return false;
+ }
+
+ /* Avoid optimizing away the malloc. */
+ asm volatile("" ::"m"(ptr) : "memory");
+
+ return (origAllocated != *counter);
+ }();
+
+ return result;
+}
+
+inline size_t goodMallocSize(size_t minSize) noexcept {
+ if (minSize == 0) {
+ return 0;
+ }
+
+ if (!usingJEMalloc()) {
+ // Not using jemalloc - no smarts
+ return minSize;
+ }
+
+ return nallocx(minSize, 0);
+}
+
+// We always request "good" sizes for allocation, so jemalloc can
+// never grow in place small blocks; they're already occupied to the
+// brim. Blocks larger than or equal to 4096 bytes can in fact be
+// expanded in place, and this constant reflects that.
+static const size_t jemallocMinInPlaceExpandable = 4096;
+
+/**
+ * Trivial wrappers around malloc, calloc, realloc that check for allocation
+ * failure and throw std::bad_alloc in that case.
+ */
+inline void* checkedMalloc(size_t size) {
+ void* p = malloc(size);
+ if (!p) {
+ std::__throw_bad_alloc();
+ }
+ return p;
+}
+
+inline void* checkedCalloc(size_t n, size_t size) {
+ void* p = calloc(n, size);
+ if (!p) {
+ std::__throw_bad_alloc();
+ }
+ return p;
+}
+
+inline void* checkedRealloc(void* ptr, size_t size) {
+ void* p = realloc(ptr, size);
+ if (!p) {
+ std::__throw_bad_alloc();
+ }
+ return p;
+}
+
+/**
+ * This function tries to reallocate a buffer of which only the first
+ * currentSize bytes are used. The problem with using realloc is that
+ * if currentSize is relatively small _and_ if realloc decides it
+ * needs to move the memory chunk to a new buffer, then realloc ends
+ * up copying data that is not used. It's generally not a win to try
+ * to hook in to realloc() behavior to avoid copies - at least in
+ * jemalloc, realloc() almost always ends up doing a copy, because
+ * there is little fragmentation / slack space to take advantage of.
+ */
+FOLLY_MALLOC_CHECKED_MALLOC FOLLY_MALLOC_NOINLINE inline void* smartRealloc(
+ void* p,
+ const size_t currentSize,
+ const size_t currentCapacity,
+ const size_t newCapacity) {
+ assert(p);
+ assert(currentSize <= currentCapacity &&
+ currentCapacity < newCapacity);
+
+ auto const slack = currentCapacity - currentSize;
+ if (slack * 2 > currentSize) {
+ // Too much slack, malloc-copy-free cycle:
+ auto const result = checkedMalloc(newCapacity);
+ std::memcpy(result, p, currentSize);
+ free(p);
+ return result;
+ }
+ // If there's not too much slack, we realloc in hope of coalescing
+ return checkedRealloc(p, newCapacity);
+}
+
+#ifdef _LIBSTDCXX_FBSTRING
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
+
+} // namespace folly
+
+#endif // !defined(_GLIBCXX_USE_FB) || defined(_LIBSTDCXX_FBSTRING)
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/memory/ThreadCachedArena.h>
+
+#include <memory>
+
+namespace folly {
+
+ThreadCachedArena::ThreadCachedArena(size_t minBlockSize, size_t maxAlign)
+ : minBlockSize_(minBlockSize), maxAlign_(maxAlign) {
+}
+
+SysArena* ThreadCachedArena::allocateThreadLocalArena() {
+ SysArena* arena =
+ new SysArena(minBlockSize_, SysArena::kNoSizeLimit, maxAlign_);
+ auto disposer = [this] (SysArena* t, TLPDestructionMode mode) {
+ std::unique_ptr<SysArena> tp(t); // ensure it gets deleted
+ if (mode == TLPDestructionMode::THIS_THREAD) {
+ zombify(std::move(*t));
+ }
+ };
+ arena_.reset(arena, disposer);
+ return arena;
+}
+
+void ThreadCachedArena::zombify(SysArena&& arena) {
+ zombies_->merge(std::move(arena));
+}
+
+size_t ThreadCachedArena::totalSize() const {
+ size_t result = sizeof(ThreadCachedArena);
+ for (const auto& arena : arena_.accessAllThreads()) {
+ result += arena.totalSize();
+ }
+ result += zombies_->totalSize() - sizeof(SysArena);
+ return result;
+}
+
+} // namespace folly
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <type_traits>
+
+#include <folly/Likely.h>
+#include <folly/Synchronized.h>
+#include <folly/ThreadLocal.h>
+#include <folly/memory/Arena.h>
+
+namespace folly {
+
+/**
+ * Thread-caching arena: allocate memory which gets freed when the arena gets
+ * destroyed.
+ *
+ * The arena itself allocates memory using malloc() in blocks of
+ * at least minBlockSize bytes.
+ *
+ * For speed, each thread gets its own Arena (see Arena.h); when threads
+ * exit, the Arena gets merged into a "zombie" Arena, which will be deallocated
+ * when the ThreadCachedArena object is destroyed.
+ */
+class ThreadCachedArena {
+ public:
+ explicit ThreadCachedArena(
+ size_t minBlockSize = SysArena::kDefaultMinBlockSize,
+ size_t maxAlign = SysArena::kDefaultMaxAlign);
+
+ void* allocate(size_t size) {
+ SysArena* arena = arena_.get();
+ if (UNLIKELY(!arena)) {
+ arena = allocateThreadLocalArena();
+ }
+
+ return arena->allocate(size);
+ }
+
+ void deallocate(void* /* p */) {
+ // Deallocate? Never!
+ }
+
+ // Gets the total memory used by the arena
+ size_t totalSize() const;
+
+ private:
+ struct ThreadLocalPtrTag {};
+
+ ThreadCachedArena(const ThreadCachedArena&) = delete;
+ ThreadCachedArena(ThreadCachedArena&&) = delete;
+ ThreadCachedArena& operator=(const ThreadCachedArena&) = delete;
+ ThreadCachedArena& operator=(ThreadCachedArena&&) = delete;
+
+ SysArena* allocateThreadLocalArena();
+
+ // Zombify the blocks in arena, saving them for deallocation until
+ // the ThreadCachedArena is destroyed.
+ void zombify(SysArena&& arena);
+
+ const size_t minBlockSize_;
+ const size_t maxAlign_;
+
+ ThreadLocalPtr<SysArena, ThreadLocalPtrTag> arena_; // Per-thread arena.
+
+ // Allocations from threads that are now dead.
+ Synchronized<SysArena> zombies_;
+};
+
+template <>
+struct IsArenaAllocator<ThreadCachedArena> : std::true_type { };
+
+} // namespace folly
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/memory/detail/MallocImpl.h>
+
+extern "C" {
+
+#ifdef _MSC_VER
+// MSVC doesn't have weak symbols, so do some linker magic
+// to emulate them. (the magic is in the header)
+const char* mallocxWeak = nullptr;
+const char* rallocxWeak = nullptr;
+const char* xallocxWeak = nullptr;
+const char* sallocxWeak = nullptr;
+const char* dallocxWeak = nullptr;
+const char* sdallocxWeak = nullptr;
+const char* nallocxWeak = nullptr;
+const char* mallctlWeak = nullptr;
+const char* mallctlnametomibWeak = nullptr;
+const char* mallctlbymibWeak = nullptr;
+#elif !FOLLY_HAVE_WEAK_SYMBOLS
+void* (*mallocx)(size_t, int) = nullptr;
+void* (*rallocx)(void*, size_t, int) = nullptr;
+size_t (*xallocx)(void*, size_t, size_t, int) = nullptr;
+size_t (*sallocx)(const void*, int) = nullptr;
+void (*dallocx)(void*, int) = nullptr;
+void (*sdallocx)(void*, size_t, int) = nullptr;
+size_t (*nallocx)(size_t, int) = nullptr;
+int (*mallctl)(const char*, void*, size_t*, void*, size_t) = nullptr;
+int (*mallctlnametomib)(const char*, size_t*, size_t*) = nullptr;
+int (*mallctlbymib)(const size_t*, size_t, void*, size_t*, void*, size_t) =
+ nullptr;
+#endif
+
+}
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdlib.h>
+
+#include <folly/Portability.h>
+
+extern "C" {
+
+#if FOLLY_HAVE_WEAK_SYMBOLS
+void* mallocx(size_t, int) __attribute__((__weak__));
+void* rallocx(void*, size_t, int) __attribute__((__weak__));
+size_t xallocx(void*, size_t, size_t, int) __attribute__((__weak__));
+size_t sallocx(const void*, int) __attribute__((__weak__));
+void dallocx(void*, int) __attribute__((__weak__));
+void sdallocx(void*, size_t, int) __attribute__((__weak__));
+size_t nallocx(size_t, int) __attribute__((__weak__));
+int mallctl(const char*, void*, size_t*, void*, size_t)
+ __attribute__((__weak__));
+int mallctlnametomib(const char*, size_t*, size_t*) __attribute__((__weak__));
+int mallctlbymib(const size_t*, size_t, void*, size_t*, void*, size_t)
+ __attribute__((__weak__));
+#else
+extern void* (*mallocx)(size_t, int);
+extern void* (*rallocx)(void*, size_t, int);
+extern size_t (*xallocx)(void*, size_t, size_t, int);
+extern size_t (*sallocx)(const void*, int);
+extern void (*dallocx)(void*, int);
+extern void (*sdallocx)(void*, size_t, int);
+extern size_t (*nallocx)(size_t, int);
+extern int (*mallctl)(const char*, void*, size_t*, void*, size_t);
+extern int (*mallctlnametomib)(const char*, size_t*, size_t*);
+extern int (*mallctlbymib)(const size_t*, size_t, void*, size_t*, void*,
+ size_t);
+#ifdef _MSC_VER
+// We emulate weak linkage for MSVC. The symbols we're
+// aliasing to are hiding in MallocImpl.cpp
+#pragma comment(linker, "/alternatename:mallocx=mallocxWeak")
+#pragma comment(linker, "/alternatename:rallocx=rallocxWeak")
+#pragma comment(linker, "/alternatename:xallocx=xallocxWeak")
+#pragma comment(linker, "/alternatename:sallocx=sallocxWeak")
+#pragma comment(linker, "/alternatename:dallocx=dallocxWeak")
+#pragma comment(linker, "/alternatename:sdallocx=sdallocxWeak")
+#pragma comment(linker, "/alternatename:nallocx=nallocxWeak")
+#pragma comment(linker, "/alternatename:mallctl=mallctlWeak")
+#pragma comment(linker, "/alternatename:mallctlnametomib=mallctlnametomibWeak")
+#pragma comment(linker, "/alternatename:mallctlbymib=mallctlbymibWeak")
+#endif
+#endif
+
+}
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/memory/Arena.h>
+#include <folly/Memory.h>
+#include <folly/portability/GTest.h>
+
+#include <set>
+#include <vector>
+
+#include <glog/logging.h>
+
+using namespace folly;
+
+static_assert(IsArenaAllocator<SysArena>::value, "");
+
+TEST(Arena, SizeSanity) {
+ std::set<size_t*> allocatedItems;
+
+ static const size_t requestedBlockSize = 64;
+ SysArena arena(requestedBlockSize);
+ size_t minimum_size = sizeof(SysArena), maximum_size = minimum_size;
+ EXPECT_EQ(arena.totalSize(), minimum_size);
+
+ // Insert a single small element to get a new block
+ size_t* ptr = static_cast<size_t*>(arena.allocate(sizeof(long)));
+ allocatedItems.insert(ptr);
+ minimum_size += requestedBlockSize;
+ maximum_size += goodMallocSize(requestedBlockSize + SysArena::kBlockOverhead);
+ EXPECT_TRUE(arena.totalSize() >= minimum_size);
+ EXPECT_TRUE(arena.totalSize() <= maximum_size);
+ VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
+ << maximum_size;
+
+ // Insert a larger element, size should be the same
+ ptr = static_cast<size_t*>(arena.allocate(requestedBlockSize / 2));
+ allocatedItems.insert(ptr);
+ EXPECT_TRUE(arena.totalSize() >= minimum_size);
+ EXPECT_TRUE(arena.totalSize() <= maximum_size);
+ VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
+ << maximum_size;
+
+ // Insert 10 full block sizes to get 10 new blocks
+ for (int i = 0; i < 10; i++) {
+ ptr = static_cast<size_t*>(arena.allocate(requestedBlockSize));
+ allocatedItems.insert(ptr);
+ }
+ minimum_size += 10 * requestedBlockSize;
+ maximum_size += 10 * goodMallocSize(requestedBlockSize
+ + SysArena::kBlockOverhead);
+ EXPECT_TRUE(arena.totalSize() >= minimum_size);
+ EXPECT_TRUE(arena.totalSize() <= maximum_size);
+ VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
+ << maximum_size;
+
+ // Insert something huge
+ ptr = static_cast<size_t*>(arena.allocate(10 * requestedBlockSize));
+ allocatedItems.insert(ptr);
+ minimum_size += 10 * requestedBlockSize;
+ maximum_size += goodMallocSize(10 * requestedBlockSize
+ + SysArena::kBlockOverhead);
+ EXPECT_TRUE(arena.totalSize() >= minimum_size);
+ EXPECT_TRUE(arena.totalSize() <= maximum_size);
+ VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
+ << maximum_size;
+
+ // Nuke 'em all
+ for (const auto& item : allocatedItems) {
+ arena.deallocate(item);
+ }
+ //The total size should be the same
+ EXPECT_TRUE(arena.totalSize() >= minimum_size);
+ EXPECT_TRUE(arena.totalSize() <= maximum_size);
+ VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
+ << maximum_size;
+}
+
+TEST(Arena, BytesUsedSanity) {
+ static const size_t smallChunkSize = 1024;
+ static const size_t blockSize = goodMallocSize(16 * smallChunkSize);
+ const size_t bigChunkSize = blockSize - 4 * smallChunkSize;
+
+ size_t bytesUsed = 0;
+
+ SysArena arena(blockSize);
+ EXPECT_EQ(arena.bytesUsed(), bytesUsed);
+
+ // Insert 2 small chunks
+ arena.allocate(smallChunkSize);
+ arena.allocate(smallChunkSize);
+ bytesUsed += 2 * smallChunkSize;
+ EXPECT_EQ(arena.bytesUsed(), bytesUsed);
+ EXPECT_TRUE(arena.totalSize() >= blockSize);
+ EXPECT_TRUE(arena.totalSize() <= 2 * blockSize);
+
+ // Insert big chunk, should still fit in one block
+ arena.allocate(bigChunkSize);
+ bytesUsed += bigChunkSize;
+ EXPECT_EQ(arena.bytesUsed(), bytesUsed);
+ EXPECT_TRUE(arena.totalSize() >= blockSize);
+ EXPECT_TRUE(arena.totalSize() <= 2 * blockSize);
+
+ // Insert big chunk once more, should trigger new block allocation
+ arena.allocate(bigChunkSize);
+ bytesUsed += bigChunkSize;
+ EXPECT_EQ(arena.bytesUsed(), bytesUsed);
+ EXPECT_TRUE(arena.totalSize() >= 2 * blockSize);
+ EXPECT_TRUE(arena.totalSize() <= 3 * blockSize);
+
+ // Test that bytesUsed() accounts for alignment
+ static const size_t tinyChunkSize = 7;
+ arena.allocate(tinyChunkSize);
+ EXPECT_TRUE(arena.bytesUsed() >= bytesUsed + tinyChunkSize);
+ size_t delta = arena.bytesUsed() - bytesUsed;
+ EXPECT_EQ(delta & (delta - 1), 0);
+}
+
+TEST(Arena, Vector) {
+ static const size_t requestedBlockSize = 64;
+ SysArena arena(requestedBlockSize);
+
+ EXPECT_EQ(arena.totalSize(), sizeof(SysArena));
+
+ std::vector<size_t, StlAllocator<SysArena, size_t>>
+ vec { {}, StlAllocator<SysArena, size_t>(&arena) };
+
+ for (size_t i = 0; i < 1000; i++) {
+ vec.push_back(i);
+ }
+
+ for (size_t i = 0; i < 1000; i++) {
+ EXPECT_EQ(i, vec[i]);
+ }
+}
+
+TEST(Arena, SizeLimit) {
+ static const size_t requestedBlockSize = sizeof(size_t);
+ static const size_t maxSize = 10 * requestedBlockSize;
+
+ SysArena arena(requestedBlockSize, maxSize);
+
+ void* a = arena.allocate(sizeof(size_t));
+ EXPECT_TRUE(a != nullptr);
+ EXPECT_THROW(arena.allocate(maxSize + 1), std::bad_alloc);
+}
+
+TEST(Arena, MoveArena) {
+ SysArena arena(sizeof(size_t) * 2);
+ arena.allocate(sizeof(size_t));
+ auto totalSize = arena.totalSize();
+ auto bytesUsed = arena.bytesUsed();
+
+ SysArena moved(std::move(arena));
+ EXPECT_EQ(totalSize, moved.totalSize());
+ EXPECT_EQ(bytesUsed, moved.bytesUsed());
+}
+
+int main(int argc, char *argv[]) {
+ testing::InitGoogleTest(&argc, argv);
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+ auto ret = RUN_ALL_TESTS();
+ return ret;
+}
*/
#include <folly/memory/MallctlHelper.h>
-#include <folly/Malloc.h>
#include <folly/init/Init.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/GTest.h>
#ifdef FOLLY_HAVE_LIBJEMALLOC
--- /dev/null
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/memory/ThreadCachedArena.h>
+
+#include <algorithm>
+#include <iterator>
+#include <map>
+#include <mutex>
+#include <random>
+#include <thread>
+#include <unordered_map>
+
+#include <glog/logging.h>
+
+#include <folly/Benchmark.h>
+#include <folly/Memory.h>
+#include <folly/Range.h>
+#include <folly/portability/GTest.h>
+
+using namespace folly;
+
+namespace {
+
+class ArenaTester {
+ public:
+ explicit ArenaTester(ThreadCachedArena& arena) : arena_(&arena) { }
+
+ void allocate(size_t count, size_t maxSize);
+ void verify();
+ void merge(ArenaTester&& other);
+
+ private:
+ std::mutex mergeMutex_;
+ std::vector<std::pair<uint8_t, Range<uint8_t*>>> areas_;
+ ThreadCachedArena* arena_;
+};
+
+void ArenaTester::allocate(size_t count, size_t maxSize) {
+ // Allocate chunks of memory of random sizes
+ std::mt19937 rnd;
+ std::uniform_int_distribution<uint32_t> sizeDist(1, maxSize - 1);
+ areas_.clear();
+ areas_.reserve(count);
+ for (size_t i = 0; i < count; i++) {
+ size_t size = sizeDist(rnd);
+ uint8_t* p = static_cast<uint8_t*>(arena_->allocate(size));
+ areas_.emplace_back(uint8_t(rnd() & 0xff), Range<uint8_t*>(p, size));
+ }
+
+ // Fill each area with a different value, to prove that they don't overlap
+ // Fill in random order.
+ std::random_shuffle(areas_.begin(), areas_.end(), [&rnd](ptrdiff_t n) {
+ return std::uniform_int_distribution<uint32_t>(0, n - 1)(rnd);
+ });
+
+ for (auto& p : areas_) {
+ std::fill(p.second.begin(), p.second.end(), p.first);
+ }
+}
+
+void ArenaTester::verify() {
+ for (auto& p : areas_) {
+ for (auto v : p.second) {
+ EXPECT_EQ(p.first, v);
+ }
+ }
+}
+
+void ArenaTester::merge(ArenaTester&& other) {
+ {
+ std::lock_guard<std::mutex> lock(mergeMutex_);
+ std::move(other.areas_.begin(), other.areas_.end(),
+ std::back_inserter(areas_));
+ }
+ other.areas_.clear();
+}
+
+} // namespace
+
+TEST(ThreadCachedArena, BlockSize) {
+ static const size_t alignment = folly::max_align_v;
+ static const size_t requestedBlockSize = 64;
+
+ ThreadCachedArena arena(requestedBlockSize);
+ size_t blockSize = alignment;
+ uint8_t* prev = static_cast<uint8_t*>(arena.allocate(1));
+
+ // Keep allocating until we're no longer one single alignment away from the
+ // previous allocation -- that's when we've gotten to the next block.
+ uint8_t* p;
+ while ((p = static_cast<uint8_t*>(arena.allocate(1))) ==
+ prev + alignment) {
+ prev = p;
+ blockSize += alignment;
+ }
+
+ VLOG(1) << "Requested block size: " << requestedBlockSize << ", actual: "
+ << blockSize;
+ EXPECT_LE(requestedBlockSize, blockSize);
+}
+
+TEST(ThreadCachedArena, SingleThreaded) {
+ static const size_t requestedBlockSize = 64;
+ ThreadCachedArena arena(requestedBlockSize);
+ EXPECT_EQ(arena.totalSize(), sizeof(ThreadCachedArena));
+
+ ArenaTester tester(arena);
+ tester.allocate(100, 100 << 10);
+ tester.verify();
+
+ EXPECT_GT(arena.totalSize(), sizeof(ThreadCachedArena));
+}
+
+TEST(ThreadCachedArena, MultiThreaded) {
+ static const size_t requestedBlockSize = 64;
+ ThreadCachedArena arena(requestedBlockSize);
+ ArenaTester mainTester(arena);
+
+ // Do this twice, to catch the possibility that memory from the first
+ // round gets freed
+ static const size_t numThreads = 20;
+ for (size_t i = 0; i < 2; i++) {
+ std::vector<std::thread> threads;
+ threads.reserve(numThreads);
+ for (size_t j = 0; j < numThreads; j++) {
+ threads.emplace_back(
+ [&arena, &mainTester] () {
+ ArenaTester tester(arena);
+ tester.allocate(500, 1 << 10);
+ tester.verify();
+ mainTester.merge(std::move(tester));
+ });
+ }
+ for (auto& t : threads) {
+ t.join();
+ }
+ }
+
+ mainTester.verify();
+}
+
+TEST(ThreadCachedArena, StlAllocator) {
+ typedef std::unordered_map<
+ int, int, std::hash<int>, std::equal_to<int>,
+ StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
+
+ static const size_t requestedBlockSize = 64;
+ ThreadCachedArena arena(requestedBlockSize);
+
+ Map map {0, std::hash<int>(), std::equal_to<int>(),
+ StlAllocator<ThreadCachedArena, std::pair<const int, int>>(&arena)};
+
+ for (int i = 0; i < 1000; i++) {
+ map[i] = i;
+ }
+
+ for (int i = 0; i < 1000; i++) {
+ EXPECT_EQ(i, map[i]);
+ }
+}
+
+namespace {
+
+static const int kNumValues = 10000;
+
+BENCHMARK(bmUMStandard, iters) {
+ typedef std::unordered_map<int, int> Map;
+
+ while (iters--) {
+ Map map {0};
+ for (int i = 0; i < kNumValues; i++) {
+ map[i] = i;
+ }
+ }
+}
+
+BENCHMARK(bmUMArena, iters) {
+ typedef std::unordered_map<
+ int, int, std::hash<int>, std::equal_to<int>,
+ StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
+
+ while (iters--) {
+ ThreadCachedArena arena;
+
+ Map map {0, std::hash<int>(), std::equal_to<int>(),
+ StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
+ &arena)};
+
+ for (int i = 0; i < kNumValues; i++) {
+ map[i] = i;
+ }
+ }
+}
+
+BENCHMARK_DRAW_LINE()
+
+BENCHMARK(bmMStandard, iters) {
+ typedef std::map<int, int> Map;
+
+ while (iters--) {
+ Map map;
+ for (int i = 0; i < kNumValues; i++) {
+ map[i] = i;
+ }
+ }
+}
+
+BENCHMARK_DRAW_LINE()
+
+BENCHMARK(bmMArena, iters) {
+ typedef std::map<
+ int, int, std::less<int>,
+ StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
+
+ while (iters--) {
+ ThreadCachedArena arena;
+
+ Map map {std::less<int>(),
+ StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
+ &arena)};
+
+ for (int i = 0; i < kNumValues; i++) {
+ map[i] = i;
+ }
+ }
+}
+
+BENCHMARK_DRAW_LINE()
+
+} // namespace
+
+// Benchmark Iters Total t t/iter iter/sec
+// ----------------------------------------------------------------------------
+// Comparing benchmarks: bmUMStandard,bmUMArena
+// + 143% bmUMStandard 1570 2.005 s 1.277 ms 782.9
+// * bmUMArena 3817 2.003 s 524.7 us 1.861 k
+// ----------------------------------------------------------------------------
+// Comparing benchmarks: bmMStandard,bmMArena
+// +79.0% bmMStandard 1197 2.009 s 1.678 ms 595.8
+// * bmMArena 2135 2.002 s 937.6 us 1.042 k
+// ----------------------------------------------------------------------------
+
+int main(int argc, char *argv[]) {
+ testing::InitGoogleTest(&argc, argv);
+ gflags::ParseCommandLineFlags(&argc, &argv, true);
+ auto ret = RUN_ALL_TESTS();
+ if (!ret && FLAGS_benchmark) {
+ folly::runBenchmarks();
+ }
+ return ret;
+}
#include <folly/Assume.h>
#include <folly/ConstexprMath.h>
#include <folly/FormatTraits.h>
-#include <folly/Malloc.h>
#include <folly/Portability.h>
#include <folly/SmallLocks.h>
#include <folly/Traits.h>
+#include <folly/memory/Malloc.h>
#include <folly/portability/BitsFunctexcept.h>
#include <folly/portability/Malloc.h>
#include <folly/portability/TypeTraits.h>
* @author: Marcelo Juchem <marcelo@fb.com>
*/
-#include <folly/Arena.h>
#include <folly/Memory.h>
+#include <folly/memory/Arena.h>
#include <folly/portability/GTest.h>
using namespace folly;
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <folly/Arena.h>
-#include <folly/Memory.h>
-#include <folly/portability/GTest.h>
-
-#include <set>
-#include <vector>
-
-#include <glog/logging.h>
-
-using namespace folly;
-
-static_assert(IsArenaAllocator<SysArena>::value, "");
-
-TEST(Arena, SizeSanity) {
- std::set<size_t*> allocatedItems;
-
- static const size_t requestedBlockSize = 64;
- SysArena arena(requestedBlockSize);
- size_t minimum_size = sizeof(SysArena), maximum_size = minimum_size;
- EXPECT_EQ(arena.totalSize(), minimum_size);
-
- // Insert a single small element to get a new block
- size_t* ptr = static_cast<size_t*>(arena.allocate(sizeof(long)));
- allocatedItems.insert(ptr);
- minimum_size += requestedBlockSize;
- maximum_size += goodMallocSize(requestedBlockSize + SysArena::kBlockOverhead);
- EXPECT_TRUE(arena.totalSize() >= minimum_size);
- EXPECT_TRUE(arena.totalSize() <= maximum_size);
- VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
- << maximum_size;
-
- // Insert a larger element, size should be the same
- ptr = static_cast<size_t*>(arena.allocate(requestedBlockSize / 2));
- allocatedItems.insert(ptr);
- EXPECT_TRUE(arena.totalSize() >= minimum_size);
- EXPECT_TRUE(arena.totalSize() <= maximum_size);
- VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
- << maximum_size;
-
- // Insert 10 full block sizes to get 10 new blocks
- for (int i = 0; i < 10; i++) {
- ptr = static_cast<size_t*>(arena.allocate(requestedBlockSize));
- allocatedItems.insert(ptr);
- }
- minimum_size += 10 * requestedBlockSize;
- maximum_size += 10 * goodMallocSize(requestedBlockSize
- + SysArena::kBlockOverhead);
- EXPECT_TRUE(arena.totalSize() >= minimum_size);
- EXPECT_TRUE(arena.totalSize() <= maximum_size);
- VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
- << maximum_size;
-
- // Insert something huge
- ptr = static_cast<size_t*>(arena.allocate(10 * requestedBlockSize));
- allocatedItems.insert(ptr);
- minimum_size += 10 * requestedBlockSize;
- maximum_size += goodMallocSize(10 * requestedBlockSize
- + SysArena::kBlockOverhead);
- EXPECT_TRUE(arena.totalSize() >= minimum_size);
- EXPECT_TRUE(arena.totalSize() <= maximum_size);
- VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
- << maximum_size;
-
- // Nuke 'em all
- for (const auto& item : allocatedItems) {
- arena.deallocate(item);
- }
- //The total size should be the same
- EXPECT_TRUE(arena.totalSize() >= minimum_size);
- EXPECT_TRUE(arena.totalSize() <= maximum_size);
- VLOG(4) << minimum_size << " < " << arena.totalSize() << " < "
- << maximum_size;
-}
-
-TEST(Arena, BytesUsedSanity) {
- static const size_t smallChunkSize = 1024;
- static const size_t blockSize = goodMallocSize(16 * smallChunkSize);
- const size_t bigChunkSize = blockSize - 4 * smallChunkSize;
-
- size_t bytesUsed = 0;
-
- SysArena arena(blockSize);
- EXPECT_EQ(arena.bytesUsed(), bytesUsed);
-
- // Insert 2 small chunks
- arena.allocate(smallChunkSize);
- arena.allocate(smallChunkSize);
- bytesUsed += 2 * smallChunkSize;
- EXPECT_EQ(arena.bytesUsed(), bytesUsed);
- EXPECT_TRUE(arena.totalSize() >= blockSize);
- EXPECT_TRUE(arena.totalSize() <= 2 * blockSize);
-
- // Insert big chunk, should still fit in one block
- arena.allocate(bigChunkSize);
- bytesUsed += bigChunkSize;
- EXPECT_EQ(arena.bytesUsed(), bytesUsed);
- EXPECT_TRUE(arena.totalSize() >= blockSize);
- EXPECT_TRUE(arena.totalSize() <= 2 * blockSize);
-
- // Insert big chunk once more, should trigger new block allocation
- arena.allocate(bigChunkSize);
- bytesUsed += bigChunkSize;
- EXPECT_EQ(arena.bytesUsed(), bytesUsed);
- EXPECT_TRUE(arena.totalSize() >= 2 * blockSize);
- EXPECT_TRUE(arena.totalSize() <= 3 * blockSize);
-
- // Test that bytesUsed() accounts for alignment
- static const size_t tinyChunkSize = 7;
- arena.allocate(tinyChunkSize);
- EXPECT_TRUE(arena.bytesUsed() >= bytesUsed + tinyChunkSize);
- size_t delta = arena.bytesUsed() - bytesUsed;
- EXPECT_EQ(delta & (delta - 1), 0);
-}
-
-TEST(Arena, Vector) {
- static const size_t requestedBlockSize = 64;
- SysArena arena(requestedBlockSize);
-
- EXPECT_EQ(arena.totalSize(), sizeof(SysArena));
-
- std::vector<size_t, StlAllocator<SysArena, size_t>>
- vec { {}, StlAllocator<SysArena, size_t>(&arena) };
-
- for (size_t i = 0; i < 1000; i++) {
- vec.push_back(i);
- }
-
- for (size_t i = 0; i < 1000; i++) {
- EXPECT_EQ(i, vec[i]);
- }
-}
-
-TEST(Arena, SizeLimit) {
- static const size_t requestedBlockSize = sizeof(size_t);
- static const size_t maxSize = 10 * requestedBlockSize;
-
- SysArena arena(requestedBlockSize, maxSize);
-
- void* a = arena.allocate(sizeof(size_t));
- EXPECT_TRUE(a != nullptr);
- EXPECT_THROW(arena.allocate(maxSize + 1), std::bad_alloc);
-}
-
-TEST(Arena, MoveArena) {
- SysArena arena(sizeof(size_t) * 2);
- arena.allocate(sizeof(size_t));
- auto totalSize = arena.totalSize();
- auto bytesUsed = arena.bytesUsed();
-
- SysArena moved(std::move(arena));
- EXPECT_EQ(totalSize, moved.totalSize());
- EXPECT_EQ(bytesUsed, moved.bytesUsed());
-}
-
-int main(int argc, char *argv[]) {
- testing::InitGoogleTest(&argc, argv);
- gflags::ParseCommandLineFlags(&argc, &argv, true);
- auto ret = RUN_ALL_TESTS();
- return ret;
-}
#include <glog/logging.h>
-#include <folly/Arena.h>
#include <folly/Foreach.h>
#include <folly/Memory.h>
#include <folly/String.h>
+#include <folly/memory/Arena.h>
#include <folly/portability/GFlags.h>
#include <folly/portability/GTest.h>
#include <glog/logging.h>
-#include <folly/Arena.h>
#include <folly/String.h>
+#include <folly/memory/Arena.h>
#include <folly/portability/GTest.h>
using namespace folly;
+++ /dev/null
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <folly/ThreadCachedArena.h>
-
-#include <algorithm>
-#include <iterator>
-#include <map>
-#include <mutex>
-#include <random>
-#include <thread>
-#include <unordered_map>
-
-#include <glog/logging.h>
-
-#include <folly/Benchmark.h>
-#include <folly/Memory.h>
-#include <folly/Range.h>
-#include <folly/portability/GTest.h>
-
-using namespace folly;
-
-namespace {
-
-class ArenaTester {
- public:
- explicit ArenaTester(ThreadCachedArena& arena) : arena_(&arena) { }
-
- void allocate(size_t count, size_t maxSize);
- void verify();
- void merge(ArenaTester&& other);
-
- private:
- std::mutex mergeMutex_;
- std::vector<std::pair<uint8_t, Range<uint8_t*>>> areas_;
- ThreadCachedArena* arena_;
-};
-
-void ArenaTester::allocate(size_t count, size_t maxSize) {
- // Allocate chunks of memory of random sizes
- std::mt19937 rnd;
- std::uniform_int_distribution<uint32_t> sizeDist(1, maxSize - 1);
- areas_.clear();
- areas_.reserve(count);
- for (size_t i = 0; i < count; i++) {
- size_t size = sizeDist(rnd);
- uint8_t* p = static_cast<uint8_t*>(arena_->allocate(size));
- areas_.emplace_back(uint8_t(rnd() & 0xff), Range<uint8_t*>(p, size));
- }
-
- // Fill each area with a different value, to prove that they don't overlap
- // Fill in random order.
- std::random_shuffle(areas_.begin(), areas_.end(), [&rnd](ptrdiff_t n) {
- return std::uniform_int_distribution<uint32_t>(0, n - 1)(rnd);
- });
-
- for (auto& p : areas_) {
- std::fill(p.second.begin(), p.second.end(), p.first);
- }
-}
-
-void ArenaTester::verify() {
- for (auto& p : areas_) {
- for (auto v : p.second) {
- EXPECT_EQ(p.first, v);
- }
- }
-}
-
-void ArenaTester::merge(ArenaTester&& other) {
- {
- std::lock_guard<std::mutex> lock(mergeMutex_);
- std::move(other.areas_.begin(), other.areas_.end(),
- std::back_inserter(areas_));
- }
- other.areas_.clear();
-}
-
-} // namespace
-
-TEST(ThreadCachedArena, BlockSize) {
- static const size_t alignment = folly::max_align_v;
- static const size_t requestedBlockSize = 64;
-
- ThreadCachedArena arena(requestedBlockSize);
- size_t blockSize = alignment;
- uint8_t* prev = static_cast<uint8_t*>(arena.allocate(1));
-
- // Keep allocating until we're no longer one single alignment away from the
- // previous allocation -- that's when we've gotten to the next block.
- uint8_t* p;
- while ((p = static_cast<uint8_t*>(arena.allocate(1))) ==
- prev + alignment) {
- prev = p;
- blockSize += alignment;
- }
-
- VLOG(1) << "Requested block size: " << requestedBlockSize << ", actual: "
- << blockSize;
- EXPECT_LE(requestedBlockSize, blockSize);
-}
-
-TEST(ThreadCachedArena, SingleThreaded) {
- static const size_t requestedBlockSize = 64;
- ThreadCachedArena arena(requestedBlockSize);
- EXPECT_EQ(arena.totalSize(), sizeof(ThreadCachedArena));
-
- ArenaTester tester(arena);
- tester.allocate(100, 100 << 10);
- tester.verify();
-
- EXPECT_GT(arena.totalSize(), sizeof(ThreadCachedArena));
-}
-
-TEST(ThreadCachedArena, MultiThreaded) {
- static const size_t requestedBlockSize = 64;
- ThreadCachedArena arena(requestedBlockSize);
- ArenaTester mainTester(arena);
-
- // Do this twice, to catch the possibility that memory from the first
- // round gets freed
- static const size_t numThreads = 20;
- for (size_t i = 0; i < 2; i++) {
- std::vector<std::thread> threads;
- threads.reserve(numThreads);
- for (size_t j = 0; j < numThreads; j++) {
- threads.emplace_back(
- [&arena, &mainTester] () {
- ArenaTester tester(arena);
- tester.allocate(500, 1 << 10);
- tester.verify();
- mainTester.merge(std::move(tester));
- });
- }
- for (auto& t : threads) {
- t.join();
- }
- }
-
- mainTester.verify();
-}
-
-TEST(ThreadCachedArena, StlAllocator) {
- typedef std::unordered_map<
- int, int, std::hash<int>, std::equal_to<int>,
- StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
-
- static const size_t requestedBlockSize = 64;
- ThreadCachedArena arena(requestedBlockSize);
-
- Map map {0, std::hash<int>(), std::equal_to<int>(),
- StlAllocator<ThreadCachedArena, std::pair<const int, int>>(&arena)};
-
- for (int i = 0; i < 1000; i++) {
- map[i] = i;
- }
-
- for (int i = 0; i < 1000; i++) {
- EXPECT_EQ(i, map[i]);
- }
-}
-
-namespace {
-
-static const int kNumValues = 10000;
-
-BENCHMARK(bmUMStandard, iters) {
- typedef std::unordered_map<int, int> Map;
-
- while (iters--) {
- Map map {0};
- for (int i = 0; i < kNumValues; i++) {
- map[i] = i;
- }
- }
-}
-
-BENCHMARK(bmUMArena, iters) {
- typedef std::unordered_map<
- int, int, std::hash<int>, std::equal_to<int>,
- StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
-
- while (iters--) {
- ThreadCachedArena arena;
-
- Map map {0, std::hash<int>(), std::equal_to<int>(),
- StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
- &arena)};
-
- for (int i = 0; i < kNumValues; i++) {
- map[i] = i;
- }
- }
-}
-
-BENCHMARK_DRAW_LINE()
-
-BENCHMARK(bmMStandard, iters) {
- typedef std::map<int, int> Map;
-
- while (iters--) {
- Map map;
- for (int i = 0; i < kNumValues; i++) {
- map[i] = i;
- }
- }
-}
-
-BENCHMARK_DRAW_LINE()
-
-BENCHMARK(bmMArena, iters) {
- typedef std::map<
- int, int, std::less<int>,
- StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
-
- while (iters--) {
- ThreadCachedArena arena;
-
- Map map {std::less<int>(),
- StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
- &arena)};
-
- for (int i = 0; i < kNumValues; i++) {
- map[i] = i;
- }
- }
-}
-
-BENCHMARK_DRAW_LINE()
-
-} // namespace
-
-// Benchmark Iters Total t t/iter iter/sec
-// ----------------------------------------------------------------------------
-// Comparing benchmarks: bmUMStandard,bmUMArena
-// + 143% bmUMStandard 1570 2.005 s 1.277 ms 782.9
-// * bmUMArena 3817 2.003 s 524.7 us 1.861 k
-// ----------------------------------------------------------------------------
-// Comparing benchmarks: bmMStandard,bmMArena
-// +79.0% bmMStandard 1197 2.009 s 1.678 ms 595.8
-// * bmMArena 2135 2.002 s 937.6 us 1.042 k
-// ----------------------------------------------------------------------------
-
-int main(int argc, char *argv[]) {
- testing::InitGoogleTest(&argc, argv);
- gflags::ParseCommandLineFlags(&argc, &argv, true);
- auto ret = RUN_ALL_TESTS();
- if (!ret && FLAGS_benchmark) {
- folly::runBenchmarks();
- }
- return ret;
-}