/*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2015-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
#include "GuardPageAllocator.h"
-#include <unistd.h>
+#ifndef _WIN32
+#include <dlfcn.h>
+#endif
+#include <signal.h>
+#include <iostream>
#include <mutex>
#include <folly/Singleton.h>
#include <folly/SpinLock.h>
+#include <folly/Synchronized.h>
#include <folly/portability/SysMman.h>
+#include <folly/portability/Unistd.h>
#include <glog/logging.h>
auto p = freeList_.back().first;
if (!freeList_.back().second) {
PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
+ SYNCHRONIZED(pages, protectedPages()) {
+ pages.insert(reinterpret_cast<intptr_t>(p));
+ }
}
freeList_.pop_back();
~StackCache() {
assert(storage_);
+ SYNCHRONIZED(pages, protectedPages()) {
+ for (const auto& item : freeList_) {
+ pages.erase(reinterpret_cast<intptr_t>(item.first));
+ }
+ }
PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
}
+ static bool isProtected(intptr_t addr) {
+ // Use a read lock for reading.
+ SYNCHRONIZED_CONST(pages, protectedPages()) {
+ for (const auto& page : pages) {
+ intptr_t pageEnd = intptr_t(page + pagesize());
+ if (page <= addr && addr < pageEnd) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
private:
folly::SpinLock lock_;
unsigned char* storage_{nullptr};
std::vector<std::pair<unsigned char*, bool>> freeList_;
static size_t pagesize() {
- static const size_t pagesize = sysconf(_SC_PAGESIZE);
+ static const size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
return pagesize;
}
static size_t allocSize(size_t size) {
return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
}
+
+ static folly::Synchronized<std::unordered_set<intptr_t>>& protectedPages() {
+ static auto instance =
+ new folly::Synchronized<std::unordered_set<intptr_t>>();
+ return *instance;
+ }
};
+#ifndef _WIN32
+
+namespace {
+
+struct sigaction oldSigsegvAction;
+
+void sigsegvSignalHandler(int signum, siginfo_t* info, void*) {
+ if (signum != SIGSEGV) {
+ std::cerr << "GuardPageAllocator signal handler called for signal: "
+ << signum;
+ return;
+ }
+
+ if (info &&
+ StackCache::isProtected(reinterpret_cast<intptr_t>(info->si_addr))) {
+ std::cerr << "folly::fibers Fiber stack overflow detected." << std::endl;
+ }
+
+ // Restore old signal handler and let it handle the signal.
+ sigaction(signum, &oldSigsegvAction, nullptr);
+ raise(signum);
+}
+
+bool isInJVM() {
+ auto getCreated = dlsym(RTLD_DEFAULT, "JNI_GetCreatedJavaVMs");
+ return getCreated;
+}
+
+void installSignalHandler() {
+ static std::once_flag onceFlag;
+ std::call_once(onceFlag, []() {
+ if (isInJVM()) {
+ // Don't install signal handler, since JVM internal signal handler doesn't
+ // work with SA_ONSTACK
+ return;
+ }
+
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sigemptyset(&sa.sa_mask);
+ // By default signal handlers are run on the signaled thread's stack.
+ // In case of stack overflow running the SIGSEGV signal handler on
+ // the same stack leads to another SIGSEGV and crashes the program.
+ // Use SA_ONSTACK, so alternate stack is used (only if configured via
+ // sigaltstack).
+ sa.sa_flags |= SA_SIGINFO | SA_ONSTACK;
+ sa.sa_sigaction = &sigsegvSignalHandler;
+ sigaction(SIGSEGV, &sa, &oldSigsegvAction);
+ });
+}
+} // namespace
+
+#endif
+
class CacheManager {
public:
static CacheManager& instance() {
std::lock_guard<folly::SpinLock> lg(lock_);
if (inUse_ < kMaxInUse) {
++inUse_;
- return folly::make_unique<StackCacheEntry>(stackSize);
+ return std::make_unique<StackCacheEntry>(stackSize);
}
return nullptr;
class StackCacheEntry {
public:
explicit StackCacheEntry(size_t stackSize)
- : stackCache_(folly::make_unique<StackCache>(stackSize)) {}
+ : stackCache_(std::make_unique<StackCache>(stackSize)) {}
StackCache& cache() const noexcept {
return *stackCache_;
};
GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
- : useGuardPages_(useGuardPages) {}
+ : useGuardPages_(useGuardPages) {
+#ifndef _WIN32
+ installSignalHandler();
+#endif
+}
GuardPageAllocator::~GuardPageAllocator() = default;
fallbackAllocator_.deallocate(limit, size);
}
}
-}
-} // folly::fibers
+} // namespace fibers
+} // namespace folly