namespace detail {
+/**
+ * Bool to integral doesn't need any special checks, and this
+ * overload means we aren't trying to see if a bool is less than
+ * an integer.
+ */
+template <class Tgt>
+typename std::enable_if<
+ !std::is_same<Tgt, bool>::value && std::is_integral<Tgt>::value,
+ Expected<Tgt, ConversionCode>>::type
+convertTo(const bool& value) noexcept {
+ return static_cast<Tgt>(value ? 1 : 0);
+}
+
/**
* Checked conversion from integral to integral. The checks are only
* performed when meaningful, e.g. conversion from int to long goes
void MemoryMapping::advise(int advice) const { advise(advice, 0, mapLength_); }
void MemoryMapping::advise(int advice, size_t offset, size_t length) const {
- CHECK_LE(offset + length, mapLength_)
+ CHECK_LE(offset + length, size_t(mapLength_))
<< " offset: " << offset
<< " length: " << length
<< " mapLength_: " << mapLength_;
}
line.append(16 - n, ' ');
line.push_back('|');
- DCHECK_EQ(line.size(), 78);
+ DCHECK_EQ(line.size(), 78u);
return n;
}
// inside what are really static ifs (not executed because of the templated
// types) that violate -Wsign-compare and/or -Wbool-compare so suppress them
// in order to not prevent all calling code from using it.
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wsign-compare"
+FOLLY_PUSH_WARNING
+FOLLY_GCC_DISABLE_WARNING(sign-compare)
#if __GNUC_PREREQ(5, 0)
-#pragma GCC diagnostic ignored "-Wbool-compare"
+FOLLY_GCC_DISABLE_WARNING(bool-compare)
#endif
+FOLLY_MSVC_DISABLE_WARNING(4388) // sign-compare
+FOLLY_MSVC_DISABLE_WARNING(4804) // bool-compare
template <typename RHS, RHS rhs, typename LHS>
bool less_than_impl(LHS const lhs) {
lhs > rhs;
}
-#pragma GCC diagnostic pop
+FOLLY_POP_WARNING
} // namespace detail {
// helper method for case where needles.size() <= 16
size_t qfind_first_byte_of_needles16(const StringPieceLite haystack,
const StringPieceLite needles) {
- DCHECK_GT(haystack.size(), 0);
- DCHECK_GT(needles.size(), 0);
- DCHECK_LE(needles.size(), 16);
+ DCHECK_GT(haystack.size(), 0u);
+ DCHECK_GT(needles.size(), 0u);
+ DCHECK_LE(needles.size(), 16u);
if ((needles.size() <= 2 && haystack.size() >= 256) ||
// must bail if we can't even SSE-load a single segment of haystack
(haystack.size() < 16 &&
size_t scanHaystackBlock(const StringPieceLite haystack,
const StringPieceLite needles,
uint64_t blockStartIdx) {
- DCHECK_GT(needles.size(), 16); // should handled by *needles16() method
+ DCHECK_GT(needles.size(), 16u); // should handled by *needles16() method
DCHECK(blockStartIdx + 16 <= haystack.size() ||
(page_for(haystack.data() + blockStartIdx) ==
page_for(haystack.data() + blockStartIdx + 15)));
#else
std::unique_ptr<ThreadEntry> threadEntry(static_cast<ThreadEntry*>(ptr));
#endif
- DCHECK_GT(threadEntry->elementsCapacity, 0);
+ DCHECK_GT(threadEntry->elementsCapacity, 0u);
auto& meta = *threadEntry->meta;
// Make sure this ThreadEntry is available if ThreadLocal A is accessed in
stackPtr_->value_ = value_;
if (stackPtr_->unmaterializedSubErrorKeys_.empty()) {
// There should be the current error, and the root.
- CHECK_GE(stackPtr_->subErrors_.size(), 2)
+ CHECK_GE(stackPtr_->subErrors_.size(), 2u)
<< "Internal bug: out of suberrors";
stackPtr_->subErrors_.pop_back();
} else {
return __builtin_popcountll(value);
}
static FOLLY_ALWAYS_INLINE int ctz(uint64_t value) {
- DCHECK_GT(value, 0);
+ DCHECK_GT(value, 0u);
return __builtin_ctzll(value);
}
static FOLLY_ALWAYS_INLINE int clz(uint64_t value) {
- DCHECK_GT(value, 0);
+ DCHECK_GT(value, 0u);
return __builtin_clzll(value);
}
static FOLLY_ALWAYS_INLINE uint64_t blsr(uint64_t value) {
auto magicptr = hdrbuf + sizeof(kMagic);
auto lenptr = hdrbuf + hdrlen;
- if (len > std::numeric_limits<int32_t>::max()) {
+ if (len > uint64_t(std::numeric_limits<int32_t>::max())) {
*magicptr = (int8_t)BserType::Int64;
*(int64_t*)lenptr = (int64_t)len;
hdrlen += sizeof(int64_t);
- } else if (len > std::numeric_limits<int16_t>::max()) {
+ } else if (len > uint64_t(std::numeric_limits<int16_t>::max())) {
*magicptr = (int8_t)BserType::Int32;
*(int32_t*)lenptr = (int32_t)len;
hdrlen += sizeof(int32_t);
- } else if (len > std::numeric_limits<int8_t>::max()) {
+ } else if (len > uint64_t(std::numeric_limits<int8_t>::max())) {
*magicptr = (int8_t)BserType::Int16;
*(int16_t*)lenptr = (int16_t)len;
hdrlen += sizeof(int16_t);
}
auto& nodes = edges_[from];
- DCHECK_EQ(0, nodes.count(to));
+ DCHECK_EQ(nodes.count(to), 0u);
nodes.insert(to);
return true;
/* Size of the region from p + nBytes down to the last non-magic value */
static size_t nonMagicInBytes(unsigned char* stackLimit, size_t stackSize) {
- CHECK_EQ(0, reinterpret_cast<intptr_t>(stackLimit) % sizeof(uint64_t));
- CHECK_EQ(0, stackSize % sizeof(uint64_t));
+ CHECK_EQ(reinterpret_cast<intptr_t>(stackLimit) % sizeof(uint64_t), 0u);
+ CHECK_EQ(stackSize % sizeof(uint64_t), 0u);
uint64_t* begin = reinterpret_cast<uint64_t*>(stackLimit);
uint64_t* end = reinterpret_cast<uint64_t*>(stackLimit + stackSize);
recordStackUsed_ = recordStackUsed;
if (UNLIKELY(recordStackUsed_ && !stackFilledWithMagic_)) {
CHECK_EQ(
- 0, reinterpret_cast<intptr_t>(fiberStackLimit_) % sizeof(uint64_t));
- CHECK_EQ(0, fiberStackSize_ % sizeof(uint64_t));
+ reinterpret_cast<intptr_t>(fiberStackLimit_) % sizeof(uint64_t), 0u);
+ CHECK_EQ(fiberStackSize_ % sizeof(uint64_t), 0u);
std::fill(
reinterpret_cast<uint64_t*>(fiberStackLimit_),
reinterpret_cast<uint64_t*>(fiberStackLimit_ + fiberStackSize_),
std::unique_ptr<FiberManager> eraseImpl(EventBaseT& evb) {
std::lock_guard<std::mutex> lg(mutex_);
- DCHECK_EQ(1, map_.count(&evb));
+ DCHECK_EQ(map_.count(&evb), 1u);
auto ret = std::move(map_[&evb]);
map_.erase(&evb);
Barrier::~Barrier() {
auto block = controlBlock_.load(std::memory_order_relaxed);
auto prev = block->valueAndReaderCount.load(std::memory_order_relaxed);
- DCHECK_EQ(prev >> kReaderShift, 0);
+ DCHECK_EQ(prev >> kReaderShift, 0u);
auto val = prev & kValueMask;
auto p = promises(block);
return false;
}
size_t n = std::min(ba.size(), bb.size());
- DCHECK_GT(n, 0);
+ DCHECK_GT(n, 0u);
if (memcmp(ba.data(), bb.data(), n)) {
return false;
}
static inline uintptr_t packFlagsAndSharedInfo(uintptr_t flags,
SharedInfo* info) {
uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
- DCHECK_EQ(flags & ~kFlagMask, 0);
- DCHECK_EQ(uinfo & kFlagMask, 0);
+ DCHECK_EQ(flags & ~kFlagMask, 0u);
+ DCHECK_EQ(uinfo & kFlagMask, 0u);
return flags | uinfo;
}
inline void setSharedInfo(SharedInfo* info) {
uintptr_t uinfo = reinterpret_cast<uintptr_t>(info);
- DCHECK_EQ(uinfo & kFlagMask, 0);
+ DCHECK_EQ(uinfo & kFlagMask, 0u);
flagsAndSharedInfo_ = (flagsAndSharedInfo_ & kFlagMask) | uinfo;
}
// flags_ are changed from const methods
inline void setFlags(uintptr_t flags) const {
- DCHECK_EQ(flags & ~kFlagMask, 0);
+ DCHECK_EQ(flags & ~kFlagMask, 0u);
flagsAndSharedInfo_ |= flags;
}
inline void clearFlags(uintptr_t flags) const {
- DCHECK_EQ(flags & ~kFlagMask, 0);
+ DCHECK_EQ(flags & ~kFlagMask, 0u);
flagsAndSharedInfo_ &= ~flags;
}
(int)message->msg_iov[i].iov_len,
message->msg_flags);
}
- if (r == -1 || r != message->msg_iov[i].iov_len) {
+ if (r == -1 || size_t(r) != message->msg_iov[i].iov_len) {
errno = translate_wsa_error(WSAGetLastError());
if (WSAGetLastError() == WSAEWOULDBLOCK && bytesSent > 0) {
return bytesSent;
return -1;
}
- if (res == curLen) {
+ if (size_t(res) == curLen) {
curIov++;
if (curIov < count) {
curBase = iov[curIov].iov_base;
size_t nLevels,
const Duration levelDurations[])
: cachedTime_(), cachedSum_(0), cachedCount_(0) {
- CHECK_GT(nLevels, 0);
+ CHECK_GT(nLevels, 0u);
CHECK(levelDurations);
levels_.reserve(nLevels);
size_t nBuckets,
std::initializer_list<Duration> durations)
: cachedTime_(), cachedSum_(0), cachedCount_(0) {
- CHECK_GT(durations.size(), 0);
+ CHECK_GT(durations.size(), 0u);
levels_.reserve(durations.size());
- int i = 0;
+ size_t i = 0;
Duration prev{0};
for (auto dur : durations) {
if (dur == Duration(0)) {
*/
const Level& getLevel(int level) const {
CHECK(level >= 0);
- CHECK_LT(level, levels_.size());
+ CHECK_LT(size_t(level), levels_.size());
return levels_[level];
}