2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/detail/MemoryIdler.h>
18 #include <folly/Logging.h>
19 #include <folly/Malloc.h>
20 #include <folly/ScopeGuard.h>
21 #include <folly/detail/CacheLocality.h>
31 namespace folly { namespace detail {
33 AtomicStruct<std::chrono::steady_clock::duration>
34 MemoryIdler::defaultIdleTimeout(std::chrono::seconds(5));
37 /// Calls mallctl, optionally reading and/or writing an unsigned value
38 /// if in and/or out is non-null. Logs on error
39 static unsigned mallctlWrapper(const char* cmd, const unsigned* in,
41 size_t outLen = sizeof(unsigned);
42 int err = mallctl(cmd,
43 out, out ? &outLen : nullptr,
44 const_cast<unsigned*>(in), in ? sizeof(unsigned) : 0);
46 FB_LOG_EVERY_MS(WARNING, 10000)
47 << "mallctl " << cmd << ": " << strerror(err) << " (" << err << ")";
52 void MemoryIdler::flushLocalMallocCaches() {
53 if (usingJEMalloc()) {
55 FB_LOG_EVERY_MS(ERROR, 10000) << "mallctl weak link failed";
59 // "tcache.flush" was renamed to "thread.tcache.flush" in jemalloc 3
60 (void)mallctlWrapper("thread.tcache.flush", nullptr, nullptr);
62 // By default jemalloc has 4 arenas per cpu, and then assigns each
63 // thread to one of those arenas. This means that in any service
64 // that doesn't perform a lot of context switching, the chances that
65 // another thread will be using the current thread's arena (and hence
66 // doing the appropriate dirty-page purging) are low. Some good
67 // tuned configurations (such as that used by hhvm) use fewer arenas
68 // and then pin threads to avoid contended access. In that case,
69 // purging the arenas is counter-productive. We use the heuristic
70 // that if narenas <= 2 * num_cpus then we shouldn't do anything here,
71 // which detects when the narenas has been reduced from the default
73 unsigned arenaForCurrent;
74 if (mallctlWrapper("arenas.narenas", nullptr, &narenas) == 0 &&
75 narenas > 2 * CacheLocality::system().numCpus &&
76 mallctlWrapper("thread.arena", nullptr, &arenaForCurrent) == 0) {
77 (void)mallctlWrapper("arenas.purge", &arenaForCurrent, nullptr);
85 static const size_t s_pageSize = sysconf(_SC_PAGESIZE);
86 static FOLLY_TLS uintptr_t tls_stackLimit;
87 static FOLLY_TLS size_t tls_stackSize;
89 static void fetchStackLimits() {
91 #if defined(_GNU_SOURCE) && defined(__linux__) // Linux+GNU extension
92 pthread_getattr_np(pthread_self(), &attr);
94 pthread_attr_init(&attr);
96 SCOPE_EXIT { pthread_attr_destroy(&attr); };
101 if ((err = pthread_attr_getstack(&attr, &addr, &rawSize))) {
102 // unexpected, but it is better to continue in prod than do nothing
103 FB_LOG_EVERY_MS(ERROR, 10000) << "pthread_attr_getstack error " << err;
108 assert(addr != nullptr);
109 assert(rawSize >= PTHREAD_STACK_MIN);
111 // glibc subtracts guard page from stack size, even though pthread docs
112 // seem to imply the opposite
114 if (pthread_attr_getguardsize(&attr, &guardSize) != 0) {
117 assert(rawSize > guardSize);
119 // stack goes down, so guard page adds to the base addr
120 tls_stackLimit = uintptr_t(addr) + guardSize;
121 tls_stackSize = rawSize - guardSize;
123 assert((tls_stackLimit & (s_pageSize - 1)) == 0);
126 FOLLY_NOINLINE static uintptr_t getStackPtr() {
128 auto rv = uintptr_t(&marker);
132 void MemoryIdler::unmapUnusedStack(size_t retain) {
133 if (tls_stackSize == 0) {
136 if (tls_stackSize <= std::max(size_t(1), retain)) {
137 // covers both missing stack info, and impossibly large retain
141 auto sp = getStackPtr();
142 assert(sp >= tls_stackLimit);
143 assert(sp - tls_stackLimit < tls_stackSize);
145 auto end = (sp - retain) & ~(s_pageSize - 1);
146 if (end <= tls_stackLimit) {
147 // no pages are eligible for unmapping
151 size_t len = end - tls_stackLimit;
152 assert((len & (s_pageSize - 1)) == 0);
153 if (madvise((void*)tls_stackLimit, len, MADV_DONTNEED) != 0) {
154 // It is likely that the stack vma hasn't been fully grown. In this
155 // case madvise will apply dontneed to the present vmas, then return
156 // errno of ENOMEM. We can also get an EAGAIN, theoretically.
157 // EINVAL means either an invalid alignment or length, or that some
158 // of the pages are locked or shared. Neither should occur.
159 assert(errno == EAGAIN || errno == ENOMEM);
165 void MemoryIdler::unmapUnusedStack(size_t retain) {