/*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
#include <folly/detail/MemoryIdler.h>
+
#include <folly/Logging.h>
-#include <folly/Malloc.h>
+#include <folly/Portability.h>
#include <folly/ScopeGuard.h>
-#include <folly/detail/CacheLocality.h>
+#include <folly/concurrency/CacheLocality.h>
+#include <folly/memory/MallctlHelper.h>
+#include <folly/memory/Malloc.h>
+#include <folly/portability/PThread.h>
+#include <folly/portability/SysMman.h>
+#include <folly/portability/Unistd.h>
+
#include <limits.h>
-#include <pthread.h>
#include <stdio.h>
#include <string.h>
-#include <unistd.h>
-#include <sys/mman.h>
#include <utility>
-
namespace folly { namespace detail {
AtomicStruct<std::chrono::steady_clock::duration>
MemoryIdler::defaultIdleTimeout(std::chrono::seconds(5));
-
-// Calls mallctl, optionally reading a value of type <T> if out is
-// non-null. Logs on error.
-template <typename T>
-static int mallctlRead(const char* cmd, T* out) {
- size_t outLen = sizeof(T);
- int err = mallctl(cmd,
- out, out ? &outLen : nullptr,
- nullptr, 0);
- if (err != 0) {
- FB_LOG_EVERY_MS(WARNING, 10000)
- << "mallctl " << cmd << ": " << strerror(err) << " (" << err << ")";
- }
- return err;
-}
-
-static int mallctlCall(const char* cmd) {
- // Use <unsigned> rather than <void> to avoid sizeof(void).
- return mallctlRead<unsigned>(cmd, nullptr);
-}
-
void MemoryIdler::flushLocalMallocCaches() {
- if (usingJEMalloc()) {
- if (!mallctl || !mallctlnametomib || !mallctlbymib) {
- FB_LOG_EVERY_MS(ERROR, 10000) << "mallctl* weak link failed";
- return;
- }
+ if (!usingJEMalloc()) {
+ return;
+ }
+ if (!mallctl || !mallctlnametomib || !mallctlbymib) {
+ FB_LOG_EVERY_MS(ERROR, 10000) << "mallctl* weak link failed";
+ return;
+ }
- // "tcache.flush" was renamed to "thread.tcache.flush" in jemalloc 3
- mallctlCall("thread.tcache.flush");
+ try {
+ // Not using mallctlCall as this will fail if tcache is disabled.
+ mallctl("thread.tcache.flush", nullptr, nullptr, nullptr, 0);
// By default jemalloc has 4 arenas per cpu, and then assigns each
// thread to one of those arenas. This means that in any service
unsigned arenaForCurrent;
size_t mib[3];
size_t miblen = 3;
- if (mallctlRead<unsigned>("opt.narenas", &narenas) == 0 &&
- narenas > 2 * CacheLocality::system().numCpus &&
- mallctlRead<unsigned>("thread.arena", &arenaForCurrent) == 0 &&
+
+ mallctlRead("opt.narenas", &narenas);
+ mallctlRead("thread.arena", &arenaForCurrent);
+ if (narenas > 2 * CacheLocality::system().numCpus &&
mallctlnametomib("arena.0.purge", mib, &miblen) == 0) {
- mib[1] = size_t(arenaForCurrent);
+ mib[1] = static_cast<size_t>(arenaForCurrent);
mallctlbymib(mib, miblen, nullptr, nullptr, nullptr, 0);
}
+ } catch (const std::runtime_error& ex) {
+ FB_LOG_EVERY_MS(WARNING, 10000) << ex.what();
}
}
// Stack madvise isn't Linux or glibc specific, but the system calls
// and arithmetic (and bug compatibility) are not portable. The set of
// platforms could be increased if it was useful.
-#if (FOLLY_X64 || FOLLY_PPC64 ) && defined(_GNU_SOURCE) && defined(__linux__)
+#if (FOLLY_X64 || FOLLY_PPC64) && defined(_GNU_SOURCE) && \
+ defined(__linux__) && !FOLLY_MOBILE && !FOLLY_SANITIZE_ADDRESS
static FOLLY_TLS uintptr_t tls_stackLimit;
static FOLLY_TLS size_t tls_stackSize;
assert(rawSize > guardSize);
// stack goes down, so guard page adds to the base addr
- tls_stackLimit = uintptr_t(addr) + guardSize;
+ tls_stackLimit = reinterpret_cast<uintptr_t>(addr) + guardSize;
tls_stackSize = rawSize - guardSize;
assert((tls_stackLimit & (pageSize() - 1)) == 0);
FOLLY_NOINLINE static uintptr_t getStackPtr() {
char marker;
- auto rv = uintptr_t(&marker);
+ auto rv = reinterpret_cast<uintptr_t>(&marker);
return rv;
}
if (tls_stackSize == 0) {
fetchStackLimits();
}
- if (tls_stackSize <= std::max(size_t(1), retain)) {
+ if (tls_stackSize <= std::max(static_cast<size_t>(1), retain)) {
// covers both missing stack info, and impossibly large retain
return;
}
#else
-void MemoryIdler::unmapUnusedStack(size_t retain) {
-}
+void MemoryIdler::unmapUnusedStack(size_t /* retain */) {}
#endif
-}}
+} // namespace detail
+} // namespace folly