2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "GuardPageAllocator.h"
22 #include <folly/Singleton.h>
23 #include <folly/SpinLock.h>
24 #include <folly/portability/SysMman.h>
26 #include <glog/logging.h>
28 namespace folly { namespace fibers {
31 * Each stack with a guard page creates two memory mappings.
32 * Since this is a limited resource, we don't want to create too many of these.
34 * The upper bound on total number of mappings created
35 * is kNumGuarded * kMaxInUse.
39 * Number of guarded stacks per allocator instance
41 constexpr size_t kNumGuarded = 100;
44 * Maximum number of allocator instances with guarded stacks enabled
46 constexpr size_t kMaxInUse = 100;
49 * A cache for kNumGuarded stacks of a given size
55 explicit StackCache(size_t stackSize)
56 : allocSize_(allocSize(stackSize)) {
57 auto p = ::mmap(nullptr, allocSize_ * kNumGuarded,
58 PROT_READ | PROT_WRITE,
59 MAP_PRIVATE | MAP_ANONYMOUS,
61 PCHECK(p != (void*)(-1));
62 storage_ = reinterpret_cast<unsigned char*>(p);
64 /* Protect the bottommost page of every stack allocation */
65 for (size_t i = 0; i < kNumGuarded; ++i) {
66 auto allocBegin = storage_ + allocSize_ * i;
67 freeList_.push_back(allocBegin);
68 PCHECK(0 == ::mprotect(allocBegin, pagesize(), PROT_NONE));
72 unsigned char* borrow(size_t size) {
73 std::lock_guard<folly::SpinLock> lg(lock_);
77 auto as = allocSize(size);
78 if (as != allocSize_ || freeList_.empty()) {
82 auto p = freeList_.back();
85 /* We allocate minimum number of pages required, plus a guard page.
86 Since we use this for stack storage, requested allocation is aligned
87 at the top of the allocated pages, while the guard page is at the bottom.
89 -- increasing addresses -->
90 Guard page Normal pages
91 |xxxxxxxxxx|..........|..........|
92 <- allocSize_ ------------------->
93 p -^ <- size -------->
96 auto limit = p + allocSize_ - size;
97 assert(limit >= p + pagesize());
101 bool giveBack(unsigned char* limit, size_t size) {
102 std::lock_guard<folly::SpinLock> lg(lock_);
106 auto as = allocSize(size);
107 auto p = limit + size - as;
108 if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
113 assert(as == allocSize_);
114 assert((p - storage_) % allocSize_ == 0);
115 freeList_.push_back(p);
121 PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
125 folly::SpinLock lock_;
126 unsigned char* storage_{nullptr};
127 size_t allocSize_{0};
132 std::vector<unsigned char*> freeList_;
134 static size_t pagesize() {
135 static const size_t pagesize = sysconf(_SC_PAGESIZE);
139 /* Returns a multiple of pagesize() enough to store size + one guard page */
140 static size_t allocSize(size_t size) {
141 return pagesize() * ((size + pagesize() - 1)/pagesize() + 1);
147 static CacheManager& instance() {
148 static auto inst = new CacheManager();
152 std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
153 std::lock_guard<folly::SpinLock> lg(lock_);
154 if (inUse_ < kMaxInUse) {
156 return folly::make_unique<StackCacheEntry>(stackSize);
163 folly::SpinLock lock_;
166 friend class StackCacheEntry;
168 void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
171 /* Note: we can add a free list for each size bucket
172 if stack re-use is important.
173 In this case this needs to be a folly::Singleton
174 to make sure the free list is cleaned up on fork.
176 TODO(t7351705): fix Singleton destruction order
182 * RAII Wrapper around a StackCache that calls
183 * CacheManager::giveBack() on destruction.
185 class StackCacheEntry {
187 explicit StackCacheEntry(size_t stackSize)
188 : stackCache_(folly::make_unique<StackCache>(stackSize)) {
191 StackCache& cache() const noexcept {
196 CacheManager::instance().giveBack(std::move(stackCache_));
200 std::unique_ptr<StackCache> stackCache_;
203 GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
204 : useGuardPages_(useGuardPages) {
207 GuardPageAllocator::~GuardPageAllocator() = default;
209 unsigned char* GuardPageAllocator::allocate(size_t size) {
210 if (useGuardPages_ && !stackCache_) {
211 stackCache_ = CacheManager::instance().getStackCache(size);
215 auto p = stackCache_->cache().borrow(size);
220 return fallbackAllocator_.allocate(size);
223 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
224 if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
225 fallbackAllocator_.deallocate(limit, size);