From: Qi Wang Date: Mon, 23 Oct 2017 19:09:40 +0000 (-0700) Subject: MemoryIdler: use mallctl directly for tcache.flush X-Git-Tag: v2017.10.30.00~27 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=12361241531946a948924baca4075081da16db55;p=folly.git MemoryIdler: use mallctl directly for tcache.flush Summary: tcache.flush may fail if tcache is disabled. Avoid using mallctlCall which throws on error. Reviewed By: davidtgoldblatt Differential Revision: D6115419 fbshipit-source-id: 39411c80af08dc7c855efd43297809b749f935bf --- diff --git a/folly/detail/MemoryIdler.cpp b/folly/detail/MemoryIdler.cpp index cef21a13..3eb4bad6 100644 --- a/folly/detail/MemoryIdler.cpp +++ b/folly/detail/MemoryIdler.cpp @@ -46,7 +46,8 @@ void MemoryIdler::flushLocalMallocCaches() { } try { - mallctlCall("thread.tcache.flush"); + // Not using mallctlCall as this will fail if tcache is disabled. + mallctl("thread.tcache.flush", nullptr, nullptr, nullptr, 0); // By default jemalloc has 4 arenas per cpu, and then assigns each // thread to one of those arenas. This means that in any service @@ -67,7 +68,7 @@ void MemoryIdler::flushLocalMallocCaches() { mallctlRead("thread.arena", &arenaForCurrent); if (narenas > 2 * CacheLocality::system().numCpus && mallctlnametomib("arena.0.purge", mib, &miblen) == 0) { - mib[1] = size_t(arenaForCurrent); + mib[1] = static_cast(arenaForCurrent); mallctlbymib(mib, miblen, nullptr, nullptr, nullptr, 0); } } catch (const std::runtime_error& ex) { @@ -117,7 +118,7 @@ static void fetchStackLimits() { assert(rawSize > guardSize); // stack goes down, so guard page adds to the base addr - tls_stackLimit = uintptr_t(addr) + guardSize; + tls_stackLimit = reinterpret_cast(addr) + guardSize; tls_stackSize = rawSize - guardSize; assert((tls_stackLimit & (pageSize() - 1)) == 0); @@ -125,7 +126,7 @@ static void fetchStackLimits() { FOLLY_NOINLINE static uintptr_t getStackPtr() { char marker; - auto rv = uintptr_t(&marker); + auto rv = reinterpret_cast(&marker); return rv; } @@ -133,7 +134,7 @@ void MemoryIdler::unmapUnusedStack(size_t retain) { if (tls_stackSize == 0) { fetchStackLimits(); } - if (tls_stackSize <= std::max(size_t(1), retain)) { + if (tls_stackSize <= std::max(static_cast(1), retain)) { // covers both missing stack info, and impossibly large retain return; }