2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/detail/MemoryIdler.h>
18 #include <folly/Logging.h>
19 #include <folly/Malloc.h>
20 #include <folly/ScopeGuard.h>
21 #include <folly/detail/CacheLocality.h>
31 namespace folly { namespace detail {
33 AtomicStruct<std::chrono::steady_clock::duration>
34 MemoryIdler::defaultIdleTimeout(std::chrono::seconds(5));
37 /// Calls mallctl, optionally reading and/or writing an unsigned value
38 /// if in and/or out is non-null. Logs on error
39 static unsigned mallctlWrapper(const char* cmd, const unsigned* in,
41 size_t outLen = sizeof(unsigned);
42 int err = mallctl(cmd,
43 out, out ? &outLen : nullptr,
44 const_cast<unsigned*>(in), in ? sizeof(unsigned) : 0);
46 FB_LOG_EVERY_MS(WARNING, 10000)
47 << "mallctl " << cmd << ": " << strerror(err) << " (" << err << ")";
52 void MemoryIdler::flushLocalMallocCaches() {
53 if (usingJEMalloc()) {
54 if (!mallctl || !mallctlnametomib || !mallctlbymib) {
55 FB_LOG_EVERY_MS(ERROR, 10000) << "mallctl* weak link failed";
59 // "tcache.flush" was renamed to "thread.tcache.flush" in jemalloc 3
60 (void)mallctlWrapper("thread.tcache.flush", nullptr, nullptr);
62 // By default jemalloc has 4 arenas per cpu, and then assigns each
63 // thread to one of those arenas. This means that in any service
64 // that doesn't perform a lot of context switching, the chances that
65 // another thread will be using the current thread's arena (and hence
66 // doing the appropriate dirty-page purging) are low. Some good
67 // tuned configurations (such as that used by hhvm) use fewer arenas
68 // and then pin threads to avoid contended access. In that case,
69 // purging the arenas is counter-productive. We use the heuristic
70 // that if narenas <= 2 * num_cpus then we shouldn't do anything here,
71 // which detects when the narenas has been reduced from the default
72 unsigned narenas, arenaForCurrent;
75 if (mallctlWrapper("opt.narenas", nullptr, &narenas) == 0 &&
76 narenas > 2 * CacheLocality::system().numCpus &&
77 mallctlWrapper("thread.arena", nullptr, &arenaForCurrent) == 0 &&
78 mallctlnametomib("arena.0.purge", mib, &miblen) == 0) {
79 mib[1] = size_t(arenaForCurrent);
80 mallctlbymib(mib, miblen, nullptr, nullptr, nullptr, 0);
86 // Stack madvise isn't Linux or glibc specific, but the system calls
87 // and arithmetic (and bug compatibility) are not portable. The set of
88 // platforms could be increased if it was useful.
89 #if FOLLY_X64 && defined(_GNU_SOURCE) && defined(__linux__)
91 static const size_t s_pageSize = sysconf(_SC_PAGESIZE);
92 static FOLLY_TLS uintptr_t tls_stackLimit;
93 static FOLLY_TLS size_t tls_stackSize;
95 static void fetchStackLimits() {
97 pthread_getattr_np(pthread_self(), &attr);
98 SCOPE_EXIT { pthread_attr_destroy(&attr); };
103 if ((err = pthread_attr_getstack(&attr, &addr, &rawSize))) {
104 // unexpected, but it is better to continue in prod than do nothing
105 FB_LOG_EVERY_MS(ERROR, 10000) << "pthread_attr_getstack error " << err;
110 assert(addr != nullptr);
111 assert(rawSize >= PTHREAD_STACK_MIN);
113 // glibc subtracts guard page from stack size, even though pthread docs
114 // seem to imply the opposite
116 if (pthread_attr_getguardsize(&attr, &guardSize) != 0) {
119 assert(rawSize > guardSize);
121 // stack goes down, so guard page adds to the base addr
122 tls_stackLimit = uintptr_t(addr) + guardSize;
123 tls_stackSize = rawSize - guardSize;
125 assert((tls_stackLimit & (s_pageSize - 1)) == 0);
128 FOLLY_NOINLINE static uintptr_t getStackPtr() {
130 auto rv = uintptr_t(&marker);
134 void MemoryIdler::unmapUnusedStack(size_t retain) {
135 if (tls_stackSize == 0) {
138 if (tls_stackSize <= std::max(size_t(1), retain)) {
139 // covers both missing stack info, and impossibly large retain
143 auto sp = getStackPtr();
144 assert(sp >= tls_stackLimit);
145 assert(sp - tls_stackLimit < tls_stackSize);
147 auto end = (sp - retain) & ~(s_pageSize - 1);
148 if (end <= tls_stackLimit) {
149 // no pages are eligible for unmapping
153 size_t len = end - tls_stackLimit;
154 assert((len & (s_pageSize - 1)) == 0);
155 if (madvise((void*)tls_stackLimit, len, MADV_DONTNEED) != 0) {
156 // It is likely that the stack vma hasn't been fully grown. In this
157 // case madvise will apply dontneed to the present vmas, then return
158 // errno of ENOMEM. We can also get an EAGAIN, theoretically.
159 // EINVAL means either an invalid alignment or length, or that some
160 // of the pages are locked or shared. Neither should occur.
161 assert(errno == EAGAIN || errno == ENOMEM);
167 void MemoryIdler::unmapUnusedStack(size_t retain) {