X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=folly%2FMemoryMapping.cpp;h=ae4cac71aeaba2487ec745840aa604ea1daf4179;hb=461255b2148ebcff1a37c440d32c31e69e15e46a;hp=43889257b5aa6c307a4cc9785264f4d1ab95e0f6;hpb=d2eda017c492baf468ea9cd51d5618d56b6ca900;p=folly.git diff --git a/folly/MemoryMapping.cpp b/folly/MemoryMapping.cpp index 43889257..ae4cac71 100644 --- a/folly/MemoryMapping.cpp +++ b/folly/MemoryMapping.cpp @@ -1,5 +1,5 @@ /* - * Copyright 2015 Facebook, Inc. + * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,22 +15,39 @@ */ #include + +#include +#include +#include + #include -#include +#include +#include #ifdef __linux__ #include #endif #include -#include #include #include -#include -DEFINE_int64(mlock_chunk_size, 1 << 20, // 1MB +static constexpr ssize_t kDefaultMlockChunkSize = +#ifndef _MSC_VER + // Linux implementations of unmap/mlock/munlock take a kernel + // semaphore and block other threads from doing other memory + // operations. Split the operations in chunks. + (1 << 20) // 1MB +#else // _MSC_VER + // MSVC doesn't have this problem, and calling munmap many times + // with the same address is a bad idea with the windows implementation. + (-1) +#endif // _MSC_VER + ; + +DEFINE_int64(mlock_chunk_size, kDefaultMlockChunkSize, "Maximum bytes to mlock/munlock/munmap at once " - "(will be rounded up to PAGESIZE)"); + "(will be rounded up to PAGESIZE). Ignored if negative."); #ifndef MAP_POPULATE #define MAP_POPULATE 0 @@ -77,8 +94,7 @@ void getDeviceOptions(dev_t device, off_t& pageSize, bool& autoExtend) { } } #else -inline void getDeviceOptions(dev_t device, off_t& pageSize, - bool& autoExtend) { } +inline void getDeviceOptions(dev_t, off_t&, bool&) {} #endif } // namespace @@ -112,7 +128,7 @@ void MemoryMapping::init(off_t offset, off_t length) { } if (pageSize == 0) { - pageSize = sysconf(_SC_PAGESIZE); + pageSize = off_t(sysconf(_SC_PAGESIZE)); } CHECK_GT(pageSize, 0); @@ -120,7 +136,7 @@ void MemoryMapping::init(off_t offset, off_t length) { CHECK_GE(offset, 0); // Round down the start of the mapped region - size_t skipStart = offset % pageSize; + off_t skipStart = offset % pageSize; offset -= skipStart; mapLength_ = length; @@ -171,13 +187,13 @@ void MemoryMapping::init(off_t offset, off_t length) { (options_.writable ? PROT_WRITE : 0)); } - unsigned char* start = static_cast( - mmap(options_.address, mapLength_, prot, flags, file_.fd(), offset)); + unsigned char* start = static_cast(mmap( + options_.address, size_t(mapLength_), prot, flags, file_.fd(), offset)); PCHECK(start != MAP_FAILED) << " offset=" << offset << " length=" << mapLength_; mapStart_ = start; - data_.reset(start + skipStart, length); + data_.reset(start + skipStart, size_t(length)); } } @@ -189,7 +205,7 @@ off_t memOpChunkSize(off_t length, off_t pageSize) { return chunkSize; } - chunkSize = FLAGS_mlock_chunk_size; + chunkSize = off_t(FLAGS_mlock_chunk_size); off_t r = chunkSize % pageSize; if (r) { chunkSize += (pageSize - r); @@ -207,14 +223,14 @@ off_t memOpChunkSize(off_t length, off_t pageSize) { bool memOpInChunks(std::function op, void* mem, size_t bufSize, off_t pageSize, size_t& amountSucceeded) { - // unmap/mlock/munlock take a kernel semaphore and block other threads from - // doing other memory operations. If the size of the buffer is big the + // Linux' unmap/mlock/munlock take a kernel semaphore and block other threads + // from doing other memory operations. If the size of the buffer is big the // semaphore can be down for seconds (for benchmarks see // http://kostja-osipov.livejournal.com/42963.html). Doing the operations in // chunks breaks the locking into intervals and lets other threads do memory // operations of their own. - size_t chunkSize = memOpChunkSize(bufSize, pageSize); + size_t chunkSize = size_t(memOpChunkSize(off_t(bufSize), pageSize)); char* addr = static_cast(mem); amountSucceeded = 0; @@ -234,18 +250,22 @@ bool memOpInChunks(std::function op, bool MemoryMapping::mlock(LockMode lock) { size_t amountSucceeded = 0; - locked_ = memOpInChunks(::mlock, mapStart_, mapLength_, options_.pageSize, - amountSucceeded); + locked_ = memOpInChunks( + ::mlock, + mapStart_, + size_t(mapLength_), + options_.pageSize, + amountSucceeded); if (locked_) { return true; } - auto msg(folly::format( - "mlock({}) failed at {}", - mapLength_, amountSucceeded).str()); - - if (lock == LockMode::TRY_LOCK && (errno == EPERM || errno == ENOMEM)) { + auto msg = + folly::format("mlock({}) failed at {}", mapLength_, amountSucceeded); + if (lock == LockMode::TRY_LOCK && errno == EPERM) { PLOG(WARNING) << msg; + } else if (lock == LockMode::TRY_LOCK && errno == ENOMEM) { + VLOG(1) << msg; } else { PLOG(FATAL) << msg; } @@ -263,12 +283,16 @@ void MemoryMapping::munlock(bool dontneed) { if (!locked_) return; size_t amountSucceeded = 0; - if (!memOpInChunks(::munlock, mapStart_, mapLength_, options_.pageSize, - amountSucceeded)) { + if (!memOpInChunks( + ::munlock, + mapStart_, + size_t(mapLength_), + options_.pageSize, + amountSucceeded)) { PLOG(WARNING) << "munlock()"; } if (mapLength_ && dontneed && - ::madvise(mapStart_, mapLength_, MADV_DONTNEED)) { + ::madvise(mapStart_, size_t(mapLength_), MADV_DONTNEED)) { PLOG(WARNING) << "madvise()"; } locked_ = false; @@ -281,19 +305,44 @@ void MemoryMapping::hintLinearScan() { MemoryMapping::~MemoryMapping() { if (mapLength_) { size_t amountSucceeded = 0; - if (!memOpInChunks(::munmap, mapStart_, mapLength_, options_.pageSize, - amountSucceeded)) { - PLOG(FATAL) << folly::format( - "munmap({}) failed at {}", - mapLength_, amountSucceeded).str(); + if (!memOpInChunks( + ::munmap, + mapStart_, + size_t(mapLength_), + options_.pageSize, + amountSucceeded)) { + PLOG(FATAL) << folly::format("munmap({}) failed at {}", + mapLength_, amountSucceeded); } } } void MemoryMapping::advise(int advice) const { - if (mapLength_ && ::madvise(mapStart_, mapLength_, advice)) { - PLOG(WARNING) << "madvise()"; + advise(advice, 0, size_t(mapLength_)); +} + +void MemoryMapping::advise(int advice, size_t offset, size_t length) const { + CHECK_LE(offset + length, size_t(mapLength_)) + << " offset: " << offset + << " length: " << length + << " mapLength_: " << mapLength_; + + // Include the entire start page: round down to page boundary. + const auto offMisalign = offset % options_.pageSize; + offset -= offMisalign; + length += offMisalign; + + // Round the last page down to page boundary. + if (offset + length != size_t(mapLength_)) { + length -= length % options_.pageSize; + } + + if (length == 0) { + return; } + + char* mapStart = static_cast(mapStart_) + offset; + PLOG_IF(WARNING, ::madvise(mapStart, length, advice)) << "madvise"; } MemoryMapping& MemoryMapping::operator=(MemoryMapping other) { @@ -341,7 +390,7 @@ void mmapFileCopy(const char* src, const char* dest, mode_t mode) { MemoryMapping destMap( File(dest, O_RDWR | O_CREAT | O_TRUNC, mode), 0, - srcMap.range().size(), + off_t(srcMap.range().size()), MemoryMapping::writable()); alignedForwardMemcpy(destMap.writableRange().data(),