From 3a529d4c3f90d46de7481e155a1d4729238e4442 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Fri, 4 Sep 2015 09:12:18 -0700 Subject: [PATCH] Fix MemoryIdler::flushLocalMallocCaches() regression. Summary: Fix MemoryIdler::flushLocalMallocCaches() to use the correct type when querying jemalloc's "opt.narenas" mallctl. Reviewed By: @jobenber Differential Revision: D2414309 --- folly/detail/MemoryIdler.cpp | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/folly/detail/MemoryIdler.cpp b/folly/detail/MemoryIdler.cpp index fcba197e..07ec2fbb 100644 --- a/folly/detail/MemoryIdler.cpp +++ b/folly/detail/MemoryIdler.cpp @@ -34,14 +34,14 @@ AtomicStruct MemoryIdler::defaultIdleTimeout(std::chrono::seconds(5)); -/// Calls mallctl, optionally reading and/or writing an unsigned value -/// if in and/or out is non-null. Logs on error -static unsigned mallctlWrapper(const char* cmd, const unsigned* in, - unsigned* out) { - size_t outLen = sizeof(unsigned); +// Calls mallctl, optionally reading a value of type if out is +// non-null. Logs on error. +template +static int mallctlRead(const char* cmd, T* out) { + size_t outLen = sizeof(T); int err = mallctl(cmd, out, out ? &outLen : nullptr, - const_cast(in), in ? sizeof(unsigned) : 0); + nullptr, 0); if (err != 0) { FB_LOG_EVERY_MS(WARNING, 10000) << "mallctl " << cmd << ": " << strerror(err) << " (" << err << ")"; @@ -49,6 +49,11 @@ static unsigned mallctlWrapper(const char* cmd, const unsigned* in, return err; } +static int mallctlCall(const char* cmd) { + // Use rather than to avoid sizeof(void). + return mallctlRead(cmd, nullptr); +} + void MemoryIdler::flushLocalMallocCaches() { if (usingJEMalloc()) { if (!mallctl || !mallctlnametomib || !mallctlbymib) { @@ -57,7 +62,7 @@ void MemoryIdler::flushLocalMallocCaches() { } // "tcache.flush" was renamed to "thread.tcache.flush" in jemalloc 3 - (void)mallctlWrapper("thread.tcache.flush", nullptr, nullptr); + mallctlCall("thread.tcache.flush"); // By default jemalloc has 4 arenas per cpu, and then assigns each // thread to one of those arenas. This means that in any service @@ -69,12 +74,13 @@ void MemoryIdler::flushLocalMallocCaches() { // purging the arenas is counter-productive. We use the heuristic // that if narenas <= 2 * num_cpus then we shouldn't do anything here, // which detects when the narenas has been reduced from the default - unsigned narenas, arenaForCurrent; + size_t narenas; + unsigned arenaForCurrent; size_t mib[3]; size_t miblen = 3; - if (mallctlWrapper("opt.narenas", nullptr, &narenas) == 0 && + if (mallctlRead("opt.narenas", &narenas) == 0 && narenas > 2 * CacheLocality::system().numCpus && - mallctlWrapper("thread.arena", nullptr, &arenaForCurrent) == 0 && + mallctlRead("thread.arena", &arenaForCurrent) == 0 && mallctlnametomib("arena.0.purge", mib, &miblen) == 0) { mib[1] = size_t(arenaForCurrent); mallctlbymib(mib, miblen, nullptr, nullptr, nullptr, 0); -- 2.34.1