2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "GuardPageAllocator.h"
25 #include <folly/Singleton.h>
26 #include <folly/SpinLock.h>
27 #include <folly/Synchronized.h>
28 #include <folly/portability/SysMman.h>
29 #include <folly/portability/Unistd.h>
31 #include <glog/logging.h>
37 * Each stack with a guard page creates two memory mappings.
38 * Since this is a limited resource, we don't want to create too many of these.
40 * The upper bound on total number of mappings created
41 * is kNumGuarded * kMaxInUse.
45 * Number of guarded stacks per allocator instance
47 constexpr size_t kNumGuarded = 100;
50 * Maximum number of allocator instances with guarded stacks enabled
52 constexpr size_t kMaxInUse = 100;
55 * A cache for kNumGuarded stacks of a given size
61 explicit StackCache(size_t stackSize) : allocSize_(allocSize(stackSize)) {
64 allocSize_ * kNumGuarded,
65 PROT_READ | PROT_WRITE,
66 MAP_PRIVATE | MAP_ANONYMOUS,
69 PCHECK(p != (void*)(-1));
70 storage_ = reinterpret_cast<unsigned char*>(p);
72 /* Protect the bottommost page of every stack allocation */
73 for (size_t i = 0; i < kNumGuarded; ++i) {
74 auto allocBegin = storage_ + allocSize_ * i;
75 freeList_.emplace_back(allocBegin, /* protected= */ false);
79 unsigned char* borrow(size_t size) {
80 std::lock_guard<folly::SpinLock> lg(lock_);
84 auto as = allocSize(size);
85 if (as != allocSize_ || freeList_.empty()) {
89 auto p = freeList_.back().first;
90 if (!freeList_.back().second) {
91 PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
92 SYNCHRONIZED(pages, protectedPages()) {
93 pages.insert(reinterpret_cast<intptr_t>(p));
98 /* We allocate minimum number of pages required, plus a guard page.
99 Since we use this for stack storage, requested allocation is aligned
100 at the top of the allocated pages, while the guard page is at the bottom.
102 -- increasing addresses -->
103 Guard page Normal pages
104 |xxxxxxxxxx|..........|..........|
105 <- allocSize_ ------------------->
106 p -^ <- size -------->
109 auto limit = p + allocSize_ - size;
110 assert(limit >= p + pagesize());
114 bool giveBack(unsigned char* limit, size_t size) {
115 std::lock_guard<folly::SpinLock> lg(lock_);
119 auto as = allocSize(size);
120 auto p = limit + size - as;
121 if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
126 assert(as == allocSize_);
127 assert((p - storage_) % allocSize_ == 0);
128 freeList_.emplace_back(p, /* protected= */ true);
134 SYNCHRONIZED(pages, protectedPages()) {
135 for (const auto& item : freeList_) {
136 pages.erase(reinterpret_cast<intptr_t>(item.first));
139 PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
142 static bool isProtected(intptr_t addr) {
143 // Use a read lock for reading.
144 SYNCHRONIZED_CONST(pages, protectedPages()) {
145 for (const auto& page : pages) {
146 intptr_t pageEnd = page + pagesize();
147 if (page <= addr && addr < pageEnd) {
156 folly::SpinLock lock_;
157 unsigned char* storage_{nullptr};
158 size_t allocSize_{0};
161 * LIFO free list. Each pair contains stack pointer and protected flag.
163 std::vector<std::pair<unsigned char*, bool>> freeList_;
165 static size_t pagesize() {
166 static const size_t pagesize = sysconf(_SC_PAGESIZE);
170 /* Returns a multiple of pagesize() enough to store size + one guard page */
171 static size_t allocSize(size_t size) {
172 return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
175 static folly::Synchronized<std::unordered_set<intptr_t>>& protectedPages() {
176 static auto instance =
177 new folly::Synchronized<std::unordered_set<intptr_t>>();
186 struct sigaction oldSigsegvAction;
188 void sigsegvSignalHandler(int signum, siginfo_t* info, void*) {
189 if (signum != SIGSEGV) {
190 std::cerr << "GuardPageAllocator signal handler called for signal: "
196 StackCache::isProtected(reinterpret_cast<intptr_t>(info->si_addr))) {
197 std::cerr << "folly::fibers Fiber stack overflow detected." << std::endl;
200 // Restore old signal handler and let it handle the signal.
201 sigaction(signum, &oldSigsegvAction, nullptr);
206 auto getCreated = dlsym(RTLD_DEFAULT, "JNI_GetCreatedJavaVMs");
210 void installSignalHandler() {
211 static std::once_flag onceFlag;
212 std::call_once(onceFlag, []() {
214 // Don't install signal handler, since JVM internal signal handler doesn't
215 // work with SA_ONSTACK
220 memset(&sa, 0, sizeof(sa));
221 sigemptyset(&sa.sa_mask);
222 // By default signal handlers are run on the signaled thread's stack.
223 // In case of stack overflow running the SIGSEGV signal handler on
224 // the same stack leads to another SIGSEGV and crashes the program.
225 // Use SA_ONSTACK, so alternate stack is used (only if configured via
227 sa.sa_flags |= SA_SIGINFO | SA_ONSTACK;
228 sa.sa_sigaction = &sigsegvSignalHandler;
229 sigaction(SIGSEGV, &sa, &oldSigsegvAction);
238 static CacheManager& instance() {
239 static auto inst = new CacheManager();
243 std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
244 std::lock_guard<folly::SpinLock> lg(lock_);
245 if (inUse_ < kMaxInUse) {
247 return folly::make_unique<StackCacheEntry>(stackSize);
254 folly::SpinLock lock_;
257 friend class StackCacheEntry;
259 void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
262 /* Note: we can add a free list for each size bucket
263 if stack re-use is important.
264 In this case this needs to be a folly::Singleton
265 to make sure the free list is cleaned up on fork.
267 TODO(t7351705): fix Singleton destruction order
273 * RAII Wrapper around a StackCache that calls
274 * CacheManager::giveBack() on destruction.
276 class StackCacheEntry {
278 explicit StackCacheEntry(size_t stackSize)
279 : stackCache_(folly::make_unique<StackCache>(stackSize)) {}
281 StackCache& cache() const noexcept {
286 CacheManager::instance().giveBack(std::move(stackCache_));
290 std::unique_ptr<StackCache> stackCache_;
293 GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
294 : useGuardPages_(useGuardPages) {
296 installSignalHandler();
300 GuardPageAllocator::~GuardPageAllocator() = default;
302 unsigned char* GuardPageAllocator::allocate(size_t size) {
303 if (useGuardPages_ && !stackCache_) {
304 stackCache_ = CacheManager::instance().getStackCache(size);
308 auto p = stackCache_->cache().borrow(size);
313 return fallbackAllocator_.allocate(size);
316 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
317 if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
318 fallbackAllocator_.deallocate(limit, size);