2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "GuardPageAllocator.h"
20 #include <folly/Singleton.h>
21 #include <folly/SpinLock.h>
22 #include <folly/portability/SysMman.h>
23 #include <folly/portability/Unistd.h>
25 #include <glog/logging.h>
31 * Each stack with a guard page creates two memory mappings.
32 * Since this is a limited resource, we don't want to create too many of these.
34 * The upper bound on total number of mappings created
35 * is kNumGuarded * kMaxInUse.
39 * Number of guarded stacks per allocator instance
41 constexpr size_t kNumGuarded = 100;
44 * Maximum number of allocator instances with guarded stacks enabled
46 constexpr size_t kMaxInUse = 100;
49 * A cache for kNumGuarded stacks of a given size
55 explicit StackCache(size_t stackSize) : allocSize_(allocSize(stackSize)) {
58 allocSize_ * kNumGuarded,
59 PROT_READ | PROT_WRITE,
60 MAP_PRIVATE | MAP_ANONYMOUS,
63 PCHECK(p != (void*)(-1));
64 storage_ = reinterpret_cast<unsigned char*>(p);
66 /* Protect the bottommost page of every stack allocation */
67 for (size_t i = 0; i < kNumGuarded; ++i) {
68 auto allocBegin = storage_ + allocSize_ * i;
69 freeList_.emplace_back(allocBegin, /* protected= */ false);
73 unsigned char* borrow(size_t size) {
74 std::lock_guard<folly::SpinLock> lg(lock_);
78 auto as = allocSize(size);
79 if (as != allocSize_ || freeList_.empty()) {
83 auto p = freeList_.back().first;
84 if (!freeList_.back().second) {
85 PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
89 /* We allocate minimum number of pages required, plus a guard page.
90 Since we use this for stack storage, requested allocation is aligned
91 at the top of the allocated pages, while the guard page is at the bottom.
93 -- increasing addresses -->
94 Guard page Normal pages
95 |xxxxxxxxxx|..........|..........|
96 <- allocSize_ ------------------->
97 p -^ <- size -------->
100 auto limit = p + allocSize_ - size;
101 assert(limit >= p + pagesize());
105 bool giveBack(unsigned char* limit, size_t size) {
106 std::lock_guard<folly::SpinLock> lg(lock_);
110 auto as = allocSize(size);
111 auto p = limit + size - as;
112 if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
117 assert(as == allocSize_);
118 assert((p - storage_) % allocSize_ == 0);
119 freeList_.emplace_back(p, /* protected= */ true);
125 PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
129 folly::SpinLock lock_;
130 unsigned char* storage_{nullptr};
131 size_t allocSize_{0};
134 * LIFO free list. Each pair contains stack pointer and protected flag.
136 std::vector<std::pair<unsigned char*, bool>> freeList_;
138 static size_t pagesize() {
139 static const size_t pagesize = sysconf(_SC_PAGESIZE);
143 /* Returns a multiple of pagesize() enough to store size + one guard page */
144 static size_t allocSize(size_t size) {
145 return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
151 static CacheManager& instance() {
152 static auto inst = new CacheManager();
156 std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
157 std::lock_guard<folly::SpinLock> lg(lock_);
158 if (inUse_ < kMaxInUse) {
160 return folly::make_unique<StackCacheEntry>(stackSize);
167 folly::SpinLock lock_;
170 friend class StackCacheEntry;
172 void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
175 /* Note: we can add a free list for each size bucket
176 if stack re-use is important.
177 In this case this needs to be a folly::Singleton
178 to make sure the free list is cleaned up on fork.
180 TODO(t7351705): fix Singleton destruction order
186 * RAII Wrapper around a StackCache that calls
187 * CacheManager::giveBack() on destruction.
189 class StackCacheEntry {
191 explicit StackCacheEntry(size_t stackSize)
192 : stackCache_(folly::make_unique<StackCache>(stackSize)) {}
194 StackCache& cache() const noexcept {
199 CacheManager::instance().giveBack(std::move(stackCache_));
203 std::unique_ptr<StackCache> stackCache_;
206 GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
207 : useGuardPages_(useGuardPages) {}
209 GuardPageAllocator::~GuardPageAllocator() = default;
211 unsigned char* GuardPageAllocator::allocate(size_t size) {
212 if (useGuardPages_ && !stackCache_) {
213 stackCache_ = CacheManager::instance().getStackCache(size);
217 auto p = stackCache_->cache().borrow(size);
222 return fallbackAllocator_.allocate(size);
225 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
226 if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
227 fallbackAllocator_.deallocate(limit, size);