#include "folly/MemoryMapping.h"
#include "folly/Format.h"
+#ifdef __linux__
+#include "folly/experimental/io/HugePages.h"
+#endif
+
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/types.h>
"Maximum bytes to mlock/munlock/munmap at once "
"(will be rounded up to PAGESIZE)");
-namespace folly {
+#ifndef MAP_POPULATE
+#define MAP_POPULATE 0
+#endif
-/* protected constructor */
-MemoryMapping::MemoryMapping()
- : mapStart_(nullptr)
- , mapLength_(0)
- , pageSize_(0)
- , locked_(false) {
-}
+namespace folly {
-MemoryMapping::MemoryMapping(MemoryMapping&& other)
- : mapStart_(nullptr)
- , mapLength_(0)
- , pageSize_(0)
- , locked_(false) {
+MemoryMapping::MemoryMapping(MemoryMapping&& other) {
swap(other);
}
MemoryMapping::MemoryMapping(File file, off_t offset, off_t length,
- off_t pageSize)
- : mapStart_(nullptr)
- , mapLength_(0)
- , pageSize_(0)
- , locked_(false) {
-
- init(std::move(file), offset, length, pageSize, PROT_READ, false);
+ Options options)
+ : file_(std::move(file)),
+ options_(std::move(options)) {
+ CHECK(file_);
+ init(offset, length);
}
MemoryMapping::MemoryMapping(const char* name, off_t offset, off_t length,
- off_t pageSize)
- : MemoryMapping(File(name), offset, length, pageSize) { }
+ Options options)
+ : MemoryMapping(File(name), offset, length, options) { }
MemoryMapping::MemoryMapping(int fd, off_t offset, off_t length,
- off_t pageSize)
- : MemoryMapping(File(fd), offset, length, pageSize) { }
-
-void MemoryMapping::init(File file,
- off_t offset, off_t length,
- off_t pageSize,
- int prot,
- bool grow) {
+ Options options)
+ : MemoryMapping(File(fd), offset, length, options) { }
+
+MemoryMapping::MemoryMapping(AnonymousType, off_t length, Options options)
+ : options_(std::move(options)) {
+ init(0, length);
+}
+
+namespace {
+
+#ifdef __linux__
+void getDeviceOptions(dev_t device, off_t& pageSize, bool& autoExtend) {
+ auto ps = getHugePageSizeForDevice(device);
+ if (ps) {
+ pageSize = ps->size;
+ autoExtend = true;
+ }
+}
+#else
+inline void getDeviceOptions(dev_t device, off_t& pageSize,
+ bool& autoExtend) { }
+#endif
+
+} // namespace
+
+void MemoryMapping::init(off_t offset, off_t length) {
+ const bool grow = options_.grow;
+ const bool anon = !file_;
+ CHECK(!(grow && anon));
+
+ off_t& pageSize = options_.pageSize;
+
+ struct stat st;
+
+ // On Linux, hugetlbfs file systems don't require ftruncate() to grow the
+ // file, and (on kernels before 2.6.24) don't even allow it. Also, the file
+ // size is always a multiple of the page size.
+ bool autoExtend = false;
+
+ if (!anon) {
+ // Stat the file
+ CHECK_ERR(fstat(file_.fd(), &st));
+
+ if (pageSize == 0) {
+ getDeviceOptions(st.st_dev, pageSize, autoExtend);
+ }
+ } else {
+ DCHECK(!file_);
+ DCHECK_EQ(offset, 0);
+ CHECK_EQ(pageSize, 0);
+ CHECK_GE(length, 0);
+ }
+
if (pageSize == 0) {
pageSize = sysconf(_SC_PAGESIZE);
}
+
CHECK_GT(pageSize, 0);
CHECK_EQ(pageSize & (pageSize - 1), 0); // power of two
CHECK_GE(offset, 0);
- pageSize_ = pageSize;
// Round down the start of the mapped region
size_t skipStart = offset % pageSize;
offset -= skipStart;
- file_ = std::move(file);
mapLength_ = length;
if (mapLength_ != -1) {
mapLength_ += skipStart;
mapLength_ = (mapLength_ + pageSize - 1) / pageSize * pageSize;
}
- // stat the file
- struct stat st;
- CHECK_ERR(fstat(file_.fd(), &st));
- off_t remaining = st.st_size - offset;
+ off_t remaining = anon ? length : st.st_size - offset;
+
if (mapLength_ == -1) {
length = mapLength_ = remaining;
} else {
if (length > remaining) {
if (grow) {
- PCHECK(0 == ftruncate(file_.fd(), offset + length))
- << "ftructate() failed, couldn't grow file";
- remaining = length;
+ if (!autoExtend) {
+ PCHECK(0 == ftruncate(file_.fd(), offset + length))
+ << "ftruncate() failed, couldn't grow file to "
+ << offset + length;
+ remaining = length;
+ } else {
+ // Extend mapping to multiple of page size, don't use ftruncate
+ remaining = mapLength_;
+ }
} else {
length = remaining;
}
}
- if (mapLength_ > remaining) mapLength_ = remaining;
+ if (mapLength_ > remaining) {
+ mapLength_ = remaining;
+ }
}
if (length == 0) {
mapLength_ = 0;
mapStart_ = nullptr;
} else {
+ int flags = options_.shared ? MAP_SHARED : MAP_PRIVATE;
+ if (anon) flags |= MAP_ANONYMOUS;
+ if (options_.prefault) flags |= MAP_POPULATE;
+
+ // The standard doesn't actually require PROT_NONE to be zero...
+ int prot = PROT_NONE;
+ if (options_.readable || options_.writable) {
+ prot = ((options_.readable ? PROT_READ : 0) |
+ (options_.writable ? PROT_WRITE : 0));
+ }
+
unsigned char* start = static_cast<unsigned char*>(
- mmap(nullptr, mapLength_, prot, MAP_SHARED, file_.fd(), offset));
+ mmap(options_.address, mapLength_, prot, flags, file_.fd(), offset));
PCHECK(start != MAP_FAILED)
<< " offset=" << offset
<< " length=" << mapLength_;
bool MemoryMapping::mlock(LockMode lock) {
size_t amountSucceeded = 0;
- locked_ = memOpInChunks(::mlock, mapStart_, mapLength_, pageSize_,
+ locked_ = memOpInChunks(::mlock, mapStart_, mapLength_, options_.pageSize,
amountSucceeded);
if (locked_) {
return true;
}
// only part of the buffer was mlocked, unlock it back
- if (!memOpInChunks(::munlock, mapStart_, amountSucceeded, pageSize_,
+ if (!memOpInChunks(::munlock, mapStart_, amountSucceeded, options_.pageSize,
amountSucceeded)) {
PLOG(WARNING) << "munlock()";
}
if (!locked_) return;
size_t amountSucceeded = 0;
- if (!memOpInChunks(::munlock, mapStart_, mapLength_, pageSize_,
+ if (!memOpInChunks(::munlock, mapStart_, mapLength_, options_.pageSize,
amountSucceeded)) {
PLOG(WARNING) << "munlock()";
}
MemoryMapping::~MemoryMapping() {
if (mapLength_) {
size_t amountSucceeded = 0;
- if (!memOpInChunks(::munmap, mapStart_, mapLength_, pageSize_,
+ if (!memOpInChunks(::munmap, mapStart_, mapLength_, options_.pageSize,
amountSucceeded)) {
PLOG(FATAL) << folly::format(
"munmap({}) failed at {}",
swap(this->file_, other.file_);
swap(this->mapStart_, other.mapStart_);
swap(this->mapLength_, other.mapLength_);
- swap(this->pageSize_, other.pageSize_);
+ swap(this->options_, other.options_);
swap(this->locked_, other.locked_);
swap(this->data_, other.data_);
}
-WritableMemoryMapping::WritableMemoryMapping(
- File file, off_t offset, off_t length, off_t pageSize) {
- init(std::move(file), offset, length, pageSize, PROT_READ | PROT_WRITE, true);
+void swap(MemoryMapping& a, MemoryMapping& b) { a.swap(b); }
+
+void alignedForwardMemcpy(void* dst, const void* src, size_t size) {
+ assert(reinterpret_cast<uintptr_t>(src) % alignof(unsigned long) == 0);
+ assert(reinterpret_cast<uintptr_t>(dst) % alignof(unsigned long) == 0);
+
+ auto srcl = static_cast<const unsigned long*>(src);
+ auto dstl = static_cast<unsigned long*>(dst);
+
+ while (size >= sizeof(unsigned long)) {
+ *dstl++ = *srcl++;
+ size -= sizeof(unsigned long);
+ }
+
+ auto srcc = reinterpret_cast<const unsigned char*>(srcl);
+ auto dstc = reinterpret_cast<unsigned char*>(dstl);
+
+ while (size != 0) {
+ *dstc++ = *srcc++;
+ --size;
+ }
}
-void swap(MemoryMapping& a, MemoryMapping& b) { a.swap(b); }
+void mmapFileCopy(const char* src, const char* dest, mode_t mode) {
+ MemoryMapping srcMap(src);
+ srcMap.hintLinearScan();
+
+ MemoryMapping destMap(
+ File(dest, O_RDWR | O_CREAT | O_TRUNC, mode),
+ 0,
+ srcMap.range().size(),
+ MemoryMapping::writable());
+
+ alignedForwardMemcpy(destMap.writableRange().data(),
+ srcMap.range().data(),
+ srcMap.range().size());
+}
} // namespace folly
* The mapping will be destroyed (and the memory pointed-to by data() will
* likely become inaccessible) when the MemoryMapping object is destroyed.
*/
+ struct Options {
+ Options() { }
+
+ // Convenience methods; return *this for chaining.
+ Options& setPageSize(off_t v) { pageSize = v; return *this; }
+ Options& setShared(bool v) { shared = v; return *this; }
+ Options& setPrefault(bool v) { prefault = v; return *this; }
+ Options& setReadable(bool v) { readable = v; return *this; }
+ Options& setWritable(bool v) { writable = v; return *this; }
+ Options& setGrow(bool v) { grow = v; return *this; }
+
+ // Page size. 0 = use appropriate page size.
+ // (On Linux, we use a huge page size if the file is on a hugetlbfs
+ // file system, and the default page size otherwise)
+ off_t pageSize = 0;
+
+ // If shared (default), the memory mapping is shared with other processes
+ // mapping the same file (or children); if not shared (private), each
+ // process has its own mapping; if the mapping is writable, the changes
+ // are not reflected to the underlying file. See the discussion of
+ // MAP_PRIVATE vs MAP_SHARED in the mmap(2) manual page.
+ bool shared = true;
+
+ // Populate page tables; subsequent accesses should not be blocked
+ // by page faults. This is a hint, as it may not be supported.
+ bool prefault = false;
+
+ // Map the pages readable. Note that mapping pages without read permissions
+ // is not universally supported (not supported on hugetlbfs on Linux, for
+ // example)
+ bool readable = true;
+
+ // Map the pages writable.
+ bool writable = false;
+
+ // When mapping a file in writable mode, grow the file to the requested
+ // length (using ftruncate()) before mapping; if false, truncate the
+ // mapping to the actual file size instead.
+ bool grow = false;
+
+ // Fix map at this address, if not nullptr. Must be aligned to a multiple
+ // of the appropriate page size.
+ void* address = nullptr;
+ };
+
+ // Options to emulate the old WritableMemoryMapping: readable and writable,
+ // allow growing the file if mapping past EOF.
+ static Options writable() {
+ return Options().setWritable(true).setGrow(true);
+ }
+
+ enum AnonymousType {
+ kAnonymous
+ };
+
+ /**
+ * Create an anonymous mapping.
+ */
+ MemoryMapping(AnonymousType, off_t length, Options options=Options());
+
explicit MemoryMapping(File file,
off_t offset=0,
off_t length=-1,
- off_t pageSize=0);
+ Options options=Options());
explicit MemoryMapping(const char* name,
off_t offset=0,
off_t length=-1,
- off_t pageSize=0);
+ Options options=Options());
explicit MemoryMapping(int fd,
off_t offset=0,
off_t length=-1,
- off_t pageSize=0);
+ Options options=Options());
MemoryMapping(MemoryMapping&&);
- virtual ~MemoryMapping();
+ ~MemoryMapping();
MemoryMapping& operator=(MemoryMapping);
/**
* A range of bytes mapped by this mapping.
*/
- Range<const uint8_t*> range() const {
- return {data_.begin(), data_.end()};
+ ByteRange range() const {
+ return data_;
+ }
+
+ /**
+ * A bitwise cast of the mapped bytes as range of mutable values. Only
+ * intended for use with POD or in-place usable types.
+ */
+ template<class T>
+ Range<T*> asWritableRange() const {
+ DCHECK(options_.writable); // you'll segfault anyway...
+ size_t count = data_.size() / sizeof(T);
+ return Range<T*>(static_cast<T*>(
+ static_cast<void*>(data_.data())),
+ count);
+ }
+
+ /**
+ * A range of mutable bytes mapped by this mapping.
+ */
+ MutableByteRange writableRange() const {
+ DCHECK(options_.writable); // you'll segfault anyway...
+ return data_;
}
/**
* Return the memory area where the file was mapped.
+ * Deprecated; use range() instead.
*/
StringPiece data() const {
return asRange<const char>();
int fd() const { return file_.fd(); }
- protected:
+ private:
MemoryMapping();
- void init(File file,
- off_t offset, off_t length,
- off_t pageSize,
- int prot,
- bool grow);
+ enum InitFlags {
+ kGrow = 1 << 0,
+ kAnon = 1 << 1,
+ };
+ void init(off_t offset, off_t length);
File file_;
- void* mapStart_;
- off_t mapLength_;
- off_t pageSize_;
- bool locked_;
- Range<uint8_t*> data_;
+ void* mapStart_ = nullptr;
+ off_t mapLength_ = 0;
+ Options options_;
+ bool locked_ = false;
+ MutableByteRange data_;
};
+void swap(MemoryMapping&, MemoryMapping&);
+
/**
- * Maps files in memory for writing.
+ * A special case of memcpy() that always copies memory forwards.
+ * (libc's memcpy() is allowed to copy memory backwards, and will do so
+ * when using SSSE3 instructions).
*
- * @author Tom Jackson (tjackson@fb.com)
+ * Assumes src and dest are aligned to alignof(unsigned long).
+ *
+ * Useful when copying from/to memory mappings after hintLinearScan();
+ * copying backwards renders any prefetching useless (even harmful).
*/
-class WritableMemoryMapping : public MemoryMapping {
- public:
- explicit WritableMemoryMapping(File file,
- off_t offset = 0,
- off_t length = -1,
- off_t pageSize = 0);
- /**
- * A bitwise cast of the mapped bytes as range of mutable values. Only
- * intended for use with POD or in-place usable types.
- */
- template<class T>
- Range<T*> asWritableRange() const {
- size_t count = data_.size() / sizeof(T);
- return Range<T*>(static_cast<T*>(
- static_cast<void*>(data_.data())),
- count);
- }
+void alignedForwardMemcpy(void* dest, const void* src, size_t size);
- /**
- * A range of mutable bytes mapped by this mapping.
- */
- Range<uint8_t*> writableRange() const {
- return data_;
- }
-};
-
-void swap(MemoryMapping&, MemoryMapping&);
+/**
+ * Copy a file using mmap(). Overwrites dest.
+ */
+void mmapFileCopy(const char* src, const char* dest, mode_t mode = 0666);
} // namespace folly
#include <gflags/gflags.h>
+#include "folly/File.h"
#include "folly/Format.h"
+#include "folly/MemoryMapping.h"
#include "folly/Portability.h"
#include "folly/Range.h"
#include "folly/ScopeGuard.h"
exit(1);
}
-void copy(const char* srcFile, const char* destPrefix) {
- int srcfd = open(srcFile, O_RDONLY);
- if (srcfd == -1) {
- throw std::system_error(errno, std::system_category(), "open failed");
+void copy(const char* srcFile, const char* dest) {
+ fs::path destPath(dest);
+ if (!destPath.is_absolute()) {
+ auto hp = getHugePageSize();
+ CHECK(hp) << "no huge pages available";
+ destPath = fs::canonical_parent(destPath, hp->mountPoint);
}
- SCOPE_EXIT {
- close(srcfd);
- };
- struct stat st;
- if (fstat(srcfd, &st) == -1) {
- throw std::system_error(errno, std::system_category(), "fstat failed");
- }
-
- void* start = mmap(nullptr, st.st_size, PROT_READ, MAP_SHARED, srcfd, 0);
- if (start == MAP_FAILED) {
- throw std::system_error(errno, std::system_category(), "mmap failed");
- }
-
- SCOPE_EXIT {
- munmap(start, st.st_size);
- };
- HugePages hp;
- auto f = hp.create(ByteRange(static_cast<const unsigned char*>(start),
- st.st_size),
- destPrefix);
- std::cout << f.path << "\n";
+ mmapFileCopy(srcFile, destPath.c_str());
}
void list() {
- HugePages hp;
- for (auto& p : hp.sizes()) {
+ for (const auto& p : getHugePageSizes()) {
std::cout << p.size << " " << p.mountPoint << "\n";
}
}
#include "folly/experimental/io/HugePages.h"
-#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include "folly/gen/File.h"
#include "folly/gen/String.h"
-#ifndef MAP_POPULATE
-#define MAP_POPULATE 0
-#endif
-
namespace folly {
namespace {
}
// Get raw huge page sizes (without mount points, they'll be filled later)
-HugePageSizeVec getRawHugePageSizes() {
+HugePageSizeVec readRawHugePageSizes() {
// We need to parse file names from /sys/kernel/mm/hugepages
static const boost::regex regex(R"!(hugepages-(\d+)kB)!");
boost::smatch match;
* Get list of supported huge page sizes and their mount points, if
* hugetlbfs file systems are mounted for those sizes.
*/
-HugePageSizeVec getHugePageSizes() {
- HugePageSizeVec sizeVec = getRawHugePageSizes();
+HugePageSizeVec readHugePageSizes() {
+ HugePageSizeVec sizeVec = readRawHugePageSizes();
if (sizeVec.empty()) {
return sizeVec; // nothing to do
}
// Store mount point
pos->mountPoint = fs::canonical(fs::path(parts[1].begin(),
parts[1].end()));
+
+ struct stat st;
+ checkUnixError(stat(pos->mountPoint.c_str(), &st),
+ "stat hugepage mountpoint failed");
+ pos->device = st.st_dev;
}
};
return sizeVec;
}
-// RAII wrapper around an open file, closes on exit unless you call release()
-class ScopedFd : private boost::noncopyable {
- public:
- explicit ScopedFd(int fd) : fd_(fd) { }
- int fd() const { return fd_; }
-
- void release() {
- fd_ = -1;
- }
-
- void close() {
- if (fd_ == -1) {
- return;
- }
- int r = ::close(fd_);
- fd_ = -1;
- if (r == -1) {
- throw std::system_error(errno, std::system_category(), "close failed");
- }
- }
-
- ~ScopedFd() {
- try {
- close();
- } catch (...) {
- PLOG(ERROR) << "close failed!";
- }
- }
-
- private:
- int fd_;
-};
-
-// RAII wrapper that deletes a file upon destruction unless you call release()
-class ScopedDeleter : private boost::noncopyable {
- public:
- explicit ScopedDeleter(fs::path name) : name_(std::move(name)) { }
- void release() {
- name_.clear();
- }
-
- ~ScopedDeleter() {
- if (name_.empty()) {
- return;
- }
- int r = ::unlink(name_.c_str());
- if (r == -1) {
- PLOG(ERROR) << "unlink failed";
- }
- }
- private:
- fs::path name_;
-};
-
-// RAII wrapper around a mmap mapping, munmaps upon destruction unless you
-// call release()
-class ScopedMmap : private boost::noncopyable {
- public:
- ScopedMmap(void* start, size_t size) : start_(start), size_(size) { }
-
- void* start() const { return start_; }
- size_t size() const { return size_; }
-
- void release() {
- start_ = MAP_FAILED;
- }
-
- void munmap() {
- if (start_ == MAP_FAILED) {
- return;
- }
- int r = ::munmap(start_, size_);
- start_ = MAP_FAILED;
- if (r == -1) {
- throw std::system_error(errno, std::system_category(), "munmap failed");
- }
- }
-
- ~ScopedMmap() {
- try {
- munmap();
- } catch (...) {
- PLOG(ERROR) << "munmap failed!";
- }
- }
- private:
- void* start_;
- size_t size_;
-};
-
} // namespace
-HugePages::HugePages() : sizes_(getHugePageSizes()) { }
+const HugePageSizeVec& getHugePageSizes() {
+ static HugePageSizeVec sizes = readHugePageSizes();
+ return sizes;
+}
-const HugePageSize& HugePages::getSize(size_t hugePageSize) const {
+const HugePageSize* getHugePageSize(size_t size) {
// Linear search is just fine.
- for (auto& p : sizes_) {
+ for (auto& p : getHugePageSizes()) {
if (p.mountPoint.empty()) {
- continue; // not mounted
+ continue;
}
- if (hugePageSize == 0 || hugePageSize == p.size) {
- return p;
+ if (size == 0 || size == p.size) {
+ return &p;
}
}
- throw std::runtime_error("Huge page not supported / not mounted");
+ return nullptr;
}
-HugePages::File HugePages::create(ByteRange data,
- const fs::path& path,
- HugePageSize hugePageSize) const {
- namespace bsys = ::boost::system;
- if (hugePageSize.size == 0) {
- hugePageSize = getSize();
- }
-
- // Round size up
- File file;
- file.size = data.size() / hugePageSize.size * hugePageSize.size;
- if (file.size != data.size()) {
- file.size += hugePageSize.size;
- }
-
- {
- file.path = fs::canonical_parent(path, hugePageSize.mountPoint);
- if (!fs::starts_with(file.path, hugePageSize.mountPoint)) {
- throw fs::filesystem_error(
- "HugePages::create: path not rooted at mount point",
- file.path, hugePageSize.mountPoint,
- bsys::errc::make_error_code(bsys::errc::invalid_argument));
+const HugePageSize* getHugePageSizeForDevice(dev_t device) {
+ // Linear search is just fine.
+ for (auto& p : getHugePageSizes()) {
+ if (p.mountPoint.empty()) {
+ continue;
}
- }
- ScopedFd fd(open(file.path.c_str(), O_RDWR | O_CREAT | O_TRUNC, 0666));
- if (fd.fd() == -1) {
- throw std::system_error(errno, std::system_category(), "open failed");
- }
-
- ScopedDeleter deleter(file.path);
-
- ScopedMmap map(mmap(nullptr, file.size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, fd.fd(), 0),
- file.size);
- if (map.start() == MAP_FAILED) {
- throw std::system_error(errno, std::system_category(), "mmap failed");
- }
-
- // Ignore madvise return code
- madvise(const_cast<unsigned char*>(data.data()), data.size(),
- MADV_SEQUENTIAL);
- // Why is this not memcpy, you ask?
- // The SSSE3-optimized memcpy in glibc likes to copy memory backwards,
- // rendering any prefetching from madvise useless (even harmful).
- const unsigned char* src = data.data();
- size_t size = data.size();
- unsigned char* dest = reinterpret_cast<unsigned char*>(map.start());
- if (reinterpret_cast<uintptr_t>(src) % 8 == 0) {
- const uint64_t* src8 = reinterpret_cast<const uint64_t*>(src);
- size_t size8 = size / 8;
- uint64_t* dest8 = reinterpret_cast<uint64_t*>(dest);
- while (size8--) {
- *dest8++ = *src8++;
+ if (device == p.device) {
+ return &p;
}
- src = reinterpret_cast<const unsigned char*>(src8);
- dest = reinterpret_cast<unsigned char*>(dest8);
- size %= 8;
}
- memcpy(dest, src, size);
-
- map.munmap();
- deleter.release();
- fd.close();
-
- return file;
+ return nullptr;
}
} // namespace folly
#ifndef FOLLY_IO_HUGEPAGES_H_
#define FOLLY_IO_HUGEPAGES_H_
+#include <sys/stat.h>
+#include <sys/types.h>
#include <cstddef>
#include <string>
+#include <unistd.h>
#include <utility>
#include <vector>
namespace folly {
struct HugePageSize : private boost::totally_ordered<HugePageSize> {
- HugePageSize() : size(0) { }
explicit HugePageSize(size_t s) : size(s) { }
- size_t size;
+
+ fs::path filePath(const fs::path& relpath) const {
+ return mountPoint / relpath;
+ }
+
+ size_t size = 0;
fs::path mountPoint;
+ dev_t device = 0;
};
inline bool operator<(const HugePageSize& a, const HugePageSize& b) {
typedef std::vector<HugePageSize> HugePageSizeVec;
/**
- * Class to interface with Linux huge pages (hugetlbfs).
+ * Get list of supported huge page sizes and their mount points, if
+ * hugetlbfs file systems are mounted for those sizes.
*/
-class HugePages {
- public:
- HugePages();
-
- /**
- * Get list of supported huge page sizes and their mount points, if
- * hugetlbfs file systems are mounted for those sizes.
- */
- const HugePageSizeVec& sizes() const { return sizes_; }
-
- /**
- * Return the mount point for the requested huge page size.
- * 0 = use smallest available.
- * Throws on error.
- */
- const HugePageSize& getSize(size_t hugePageSize = 0) const;
-
- /**
- * Create a file on a huge page filesystem containing a copy of the data
- * from data. If multiple huge page sizes are allowed, we
- * pick the smallest huge page size available, unless you request one
- * explicitly with the hugePageSize argument.
- *
- * The "path" argument must be rooted under the mount point for the
- * selected huge page size. If relative, it is considered relative to the
- * mount point.
- *
- * We return a struct File structure containing the full path and size
- * (rounded up to a multiple of the huge page size)
- */
- struct File {
- File() : size(0) { }
- fs::path path;
- size_t size;
- };
- File create(
- ByteRange data, const fs::path& path,
- HugePageSize hugePageSize = HugePageSize()) const;
-
- private:
- HugePageSizeVec sizes_;
-};
+const HugePageSizeVec& getHugePageSizes();
+
+/**
+ * Return the mount point for the requested huge page size.
+ * 0 = use smallest available.
+ * Returns nullptr if the requested huge page size is not available.
+ */
+const HugePageSize* getHugePageSize(size_t size = 0);
+
+/**
+ * Return the huge page size for a device.
+ * returns nullptr if device does not refer to a huge page filesystem.
+ */
+const HugePageSize* getHugePageSizeForDevice(dev_t device);
} // namespace folly
TEST(MemoryMapping, Basic) {
File f = File::temporary();
{
- WritableMemoryMapping m(File(f.fd()), 0, sizeof(double));
+ MemoryMapping m(File(f.fd()), 0, sizeof(double),
+ MemoryMapping::writable());
double volatile* d = m.asWritableRange<double>().data();
*d = 37 * M_PI;
}
TEST(MemoryMapping, Move) {
File f = File::temporary();
{
- WritableMemoryMapping m(File(f.fd()), 0, sizeof(double) * 2);
+ MemoryMapping m(File(f.fd()), 0, sizeof(double) * 2,
+ MemoryMapping::writable());
double volatile* d = m.asWritableRange<double>().data();
d[0] = 37 * M_PI;
- WritableMemoryMapping m2(std::move(m));
+ MemoryMapping m2(std::move(m));
double volatile* d2 = m2.asWritableRange<double>().data();
d2[1] = 39 * M_PI;
}
TEST(MemoryMapping, DoublyMapped) {
File f = File::temporary();
// two mappings of the same memory, different addresses.
- WritableMemoryMapping mw(File(f.fd()), 0, sizeof(double));
+ MemoryMapping mw(File(f.fd()), 0, sizeof(double),
+ MemoryMapping::writable());
MemoryMapping mr(File(f.fd()), 0, sizeof(double));
double volatile* dw = mw.asWritableRange<double>().data();