From: Christopher Dykes <cdykes@fb.com>
Date: Wed, 1 Feb 2017 01:35:51 +0000 (-0800)
Subject: Swap a few APIs to reduce sign and implicit truncations required to work with it
X-Git-Tag: v2017.03.06.00~64
X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=41365ea66d92749ba78f73d60325e5447beb04ab;p=folly.git

Swap a few APIs to reduce sign and implicit truncations required to work with it

Summary: This results in a more uniform API within Folly, allowing for a more uniform API to use outside of Folly.

Reviewed By: yfeldblum

Differential Revision: D4471471

fbshipit-source-id: f798a6498bd1a05ed12adea362ff4aedd25789ee
---

diff --git a/folly/AtomicHashArray-inl.h b/folly/AtomicHashArray-inl.h
index 390dbb16..d2cb08f4 100644
--- a/folly/AtomicHashArray-inl.h
+++ b/folly/AtomicHashArray-inl.h
@@ -31,7 +31,7 @@ template <class KeyT, class ValueT, class HashFcn, class EqualFcn,
 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
                 Allocator, ProbeFcn, KeyConvertFcn>::
 AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
-                KeyT erasedKey, double _maxLoadFactor, size_t cacheSize)
+                KeyT erasedKey, double _maxLoadFactor, uint32_t cacheSize)
     : capacity_(capacity),
       maxEntries_(size_t(_maxLoadFactor * capacity_ + 0.5)),
       kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
diff --git a/folly/AtomicHashArray.h b/folly/AtomicHashArray.h
index 42f56560..96a647d1 100644
--- a/folly/AtomicHashArray.h
+++ b/folly/AtomicHashArray.h
@@ -176,12 +176,12 @@ class AtomicHashArray : boost::noncopyable {
    *   deleter to make sure everything is cleaned up properly.
    */
   struct Config {
-    KeyT   emptyKey;
-    KeyT   lockedKey;
-    KeyT   erasedKey;
+    KeyT emptyKey;
+    KeyT lockedKey;
+    KeyT erasedKey;
     double maxLoadFactor;
     double growthFactor;
-    int    entryCountThreadCacheSize;
+    uint32_t entryCountThreadCacheSize;
     size_t capacity; // if positive, overrides maxLoadFactor
 
   public:
@@ -329,7 +329,7 @@ class AtomicHashArray : boost::noncopyable {
     numPendingEntries_.setCacheSize(newSize);
   }
 
-  int getEntryCountThreadCacheSize() const {
+  uint32_t getEntryCountThreadCacheSize() const {
     return numEntries_.getCacheSize();
   }
 
@@ -401,8 +401,13 @@ friend class AtomicHashMap<KeyT,
 
   // Force constructor/destructor private since create/destroy should be
   // used externally instead
-  AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
-                  KeyT erasedKey, double maxLoadFactor, size_t cacheSize);
+  AtomicHashArray(
+      size_t capacity,
+      KeyT emptyKey,
+      KeyT lockedKey,
+      KeyT erasedKey,
+      double maxLoadFactor,
+      uint32_t cacheSize);
 
   ~AtomicHashArray() = default;
 
diff --git a/folly/AtomicHashMap.h b/folly/AtomicHashMap.h
index ef27f860..70d71491 100644
--- a/folly/AtomicHashMap.h
+++ b/folly/AtomicHashMap.h
@@ -448,7 +448,7 @@ typedef AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
   std::atomic<SubMap*> subMaps_[kNumSubMaps_];
   std::atomic<uint32_t> numMapsAllocated_;
 
-  inline bool tryLockMap(int idx) {
+  inline bool tryLockMap(unsigned int idx) {
     SubMap* val = nullptr;
     return subMaps_[idx].compare_exchange_strong(val, (SubMap*)kLockedPtr_,
       std::memory_order_acquire);
diff --git a/folly/AtomicUnorderedMap.h b/folly/AtomicUnorderedMap.h
index 12c02579..45877e1c 100644
--- a/folly/AtomicUnorderedMap.h
+++ b/folly/AtomicUnorderedMap.h
@@ -338,8 +338,7 @@ struct AtomicUnorderedInsertMap {
   }
 
  private:
-
-  enum {
+  enum : IndexType {
     kMaxAllocationTries = 1000, // after this we throw
   };
 
@@ -437,7 +436,7 @@ struct AtomicUnorderedInsertMap {
   /// Allocates a slot and returns its index.  Tries to put it near
   /// slots_[start].
   IndexType allocateNear(IndexType start) {
-    for (auto tries = 0; tries < kMaxAllocationTries; ++tries) {
+    for (IndexType tries = 0; tries < kMaxAllocationTries; ++tries) {
       auto slot = allocationAttempt(start, tries);
       auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
       if ((prev & 3) == EMPTY &&
diff --git a/folly/Benchmark.cpp b/folly/Benchmark.cpp
index 4a96bf36..ecb7a8af 100644
--- a/folly/Benchmark.cpp
+++ b/folly/Benchmark.cpp
@@ -52,7 +52,7 @@ DEFINE_int32(
 
 DEFINE_int64(
     bm_max_iters,
-    1L << 30L,
+    1 << 30,
     "Maximum # of iterations we'll try for each benchmark.");
 
 DEFINE_int32(
diff --git a/folly/ConcurrentSkipList-inl.h b/folly/ConcurrentSkipList-inl.h
index cf04ee72..122f21c6 100644
--- a/folly/ConcurrentSkipList-inl.h
+++ b/folly/ConcurrentSkipList-inl.h
@@ -41,11 +41,12 @@ template<typename ValT, typename NodeT> class csl_iterator;
 
 template<typename T>
 class SkipListNode : private boost::noncopyable {
-  enum {
+  enum : uint16_t {
     IS_HEAD_NODE = 1,
     MARKED_FOR_REMOVAL = (1 << 1),
     FULLY_LINKED = (1 << 2),
   };
+
  public:
   typedef T value_type;
 
@@ -78,7 +79,7 @@ class SkipListNode : private boost::noncopyable {
   SkipListNode* copyHead(SkipListNode* node) {
     DCHECK(node != nullptr && height_ > node->height_);
     setFlags(node->getFlags());
-    for (int i = 0; i < node->height_; ++i) {
+    for (uint8_t i = 0; i < node->height_; ++i) {
       setSkip(i, node->skip(i));
     }
     return this;
diff --git a/folly/Conv.h b/folly/Conv.h
index 6f98b4fa..20cf3d51 100644
--- a/folly/Conv.h
+++ b/folly/Conv.h
@@ -1204,7 +1204,8 @@ typename std::enable_if<
     Expected<Tgt, ConversionCode>>::type
 convertTo(const Src& value) noexcept {
   /* static */ if (
-      std::numeric_limits<Tgt>::max() < std::numeric_limits<Src>::max()) {
+      folly::_t<std::make_unsigned<Tgt>>(std::numeric_limits<Tgt>::max()) <
+      folly::_t<std::make_unsigned<Src>>(std::numeric_limits<Src>::max())) {
     if (greater_than<Tgt, std::numeric_limits<Tgt>::max()>(value)) {
       return makeUnexpected(ConversionCode::ARITH_POSITIVE_OVERFLOW);
     }
@@ -1239,7 +1240,7 @@ convertTo(const Src& value) noexcept {
       return makeUnexpected(ConversionCode::ARITH_NEGATIVE_OVERFLOW);
     }
   }
-  return boost::implicit_cast<Tgt>(value);
+  return static_cast<Tgt>(value);
 }
 
 /**
diff --git a/folly/DiscriminatedPtr.h b/folly/DiscriminatedPtr.h
index 542064bd..7fda96b9 100644
--- a/folly/DiscriminatedPtr.h
+++ b/folly/DiscriminatedPtr.h
@@ -190,8 +190,8 @@ class DiscriminatedPtr {
    * Get the 1-based type index of T in Types.
    */
   template <typename T>
-  size_t typeIndex() const {
-    return dptr_detail::GetTypeIndex<T, Types...>::value;
+  uint16_t typeIndex() const {
+    return uint16_t(dptr_detail::GetTypeIndex<T, Types...>::value);
   }
 
   uint16_t index() const { return data_ >> 48; }
diff --git a/folly/Format.cpp b/folly/Format.cpp
index 11f87e9d..db95ea24 100644
--- a/folly/Format.cpp
+++ b/folly/Format.cpp
@@ -50,14 +50,14 @@ void FormatValue<double>::formatHelper(
   }
 
   // 2+: for null terminator and optional sign shenanigans.
-  constexpr size_t bufLen =
+  constexpr int bufLen =
       2 + constexpr_max(
               2 + DoubleToStringConverter::kMaxFixedDigitsBeforePoint +
                   DoubleToStringConverter::kMaxFixedDigitsAfterPoint,
               constexpr_max(8 + DoubleToStringConverter::kMaxExponentialDigits,
                             7 + DoubleToStringConverter::kMaxPrecisionDigits));
   char buf[bufLen];
-  StringBuilder builder(buf + 1, static_cast<int> (sizeof(buf) - 1));
+  StringBuilder builder(buf + 1, bufLen - 1);
 
   char plusSign;
   switch (arg.sign) {
@@ -159,7 +159,7 @@ void FormatValue<double>::formatHelper(
     prefixLen = 1;
   }
 
-  piece = fbstring(p, len);
+  piece = fbstring(p, size_t(len));
 }
 
 
diff --git a/folly/IndexedMemPool.h b/folly/IndexedMemPool.h
index 9b373da5..1fa43082 100644
--- a/folly/IndexedMemPool.h
+++ b/folly/IndexedMemPool.h
@@ -84,12 +84,13 @@ struct IndexedMemPoolRecycler;
 /// constructed, but delays element construction.  This means that only
 /// elements that are actually returned to the caller get paged into the
 /// process's resident set (RSS).
-template <typename T,
-          int NumLocalLists_ = 32,
-          int LocalListLimit_ = 200,
-          template<typename> class Atom = std::atomic,
-          bool EagerRecycleWhenTrivial = false,
-          bool EagerRecycleWhenNotTrivial = true>
+template <
+    typename T,
+    uint32_t NumLocalLists_ = 32,
+    uint32_t LocalListLimit_ = 200,
+    template <typename> class Atom = std::atomic,
+    bool EagerRecycleWhenTrivial = false,
+    bool EagerRecycleWhenNotTrivial = true>
 struct IndexedMemPool : boost::noncopyable {
   typedef T value_type;
 
@@ -149,7 +150,7 @@ struct IndexedMemPool : boost::noncopyable {
   /// Destroys all of the contained elements
   ~IndexedMemPool() {
     if (!eagerRecycle()) {
-      for (size_t i = size_; i > 0; --i) {
+      for (uint32_t i = size_; i > 0; --i) {
         slots_[i].~Slot();
       }
     }
@@ -160,7 +161,7 @@ struct IndexedMemPool : boost::noncopyable {
   /// simultaneously allocated and not yet recycled.  Because of the
   /// local lists it is possible that more elements than this are returned
   /// successfully
-  size_t capacity() {
+  uint32_t capacity() {
     return capacityForMaxIndex(actualCapacity_);
   }
 
@@ -294,15 +295,15 @@ struct IndexedMemPool : boost::noncopyable {
 
   ////////// fields
 
+  /// the number of bytes allocated from mmap, which is a multiple of
+  /// the page size of the machine
+  size_t mmapLength_;
+
   /// the actual number of slots that we will allocate, to guarantee
   /// that we will satisfy the capacity requested at construction time.
   /// They will be numbered 1..actualCapacity_ (note the 1-based counting),
   /// and occupy slots_[1..actualCapacity_].
-  size_t actualCapacity_;
-
-  /// the number of bytes allocated from mmap, which is a multiple of
-  /// the page size of the machine
-  size_t mmapLength_;
+  uint32_t actualCapacity_;
 
   /// this records the number of slots that have actually been constructed.
   /// To allow use of atomic ++ instead of CAS, we let this overflow.
@@ -325,7 +326,7 @@ struct IndexedMemPool : boost::noncopyable {
 
   ///////////// private methods
 
-  size_t slotIndex(uint32_t idx) const {
+  uint32_t slotIndex(uint32_t idx) const {
     assert(0 < idx &&
            idx <= actualCapacity_ &&
            idx <= size_.load(std::memory_order_acquire));
diff --git a/folly/SocketAddress.cpp b/folly/SocketAddress.cpp
index 88781dad..ec30b5de 100644
--- a/folly/SocketAddress.cpp
+++ b/folly/SocketAddress.cpp
@@ -574,10 +574,10 @@ size_t SocketAddress::hash() const {
   if (external_) {
     enum { kUnixPathMax = sizeof(storage_.un.addr->sun_path) };
     const char *path = storage_.un.addr->sun_path;
-    size_t pathLength = storage_.un.pathLength();
+    auto pathLength = storage_.un.pathLength();
     // TODO: this probably could be made more efficient
-    for (unsigned int n = 0; n < pathLength; ++n) {
-      boost::hash_combine(seed, folly::hash::twang_mix64(path[n]));
+    for (off_t n = 0; n < pathLength; ++n) {
+      boost::hash_combine(seed, folly::hash::twang_mix64(uint64_t(path[n])));
     }
   }
 
@@ -707,7 +707,7 @@ void SocketAddress::updateUnixAddressLength(socklen_t addrlen) {
     // abstract namespace.  honor the specified length
   } else {
     // Call strnlen(), just in case the length was overspecified.
-    socklen_t maxLength = addrlen - offsetof(struct sockaddr_un, sun_path);
+    size_t maxLength = addrlen - offsetof(struct sockaddr_un, sun_path);
     size_t pathLength = strnlen(storage_.un.addr->sun_path, maxLength);
     storage_.un.len =
         socklen_t(offsetof(struct sockaddr_un, sun_path) + pathLength);
@@ -725,11 +725,11 @@ bool SocketAddress::operator<(const SocketAddress& other) const {
     //
     // Note that this still meets the requirements for a strict weak
     // ordering, so we can use this operator<() with standard C++ containers.
-    size_t thisPathLength = storage_.un.pathLength();
+    auto thisPathLength = storage_.un.pathLength();
     if (thisPathLength == 0) {
       return false;
     }
-    size_t otherPathLength = other.storage_.un.pathLength();
+    auto otherPathLength = other.storage_.un.pathLength();
     if (otherPathLength == 0) {
       return true;
     }
diff --git a/folly/build/GenerateFingerprintTables.cpp b/folly/build/GenerateFingerprintTables.cpp
index 4911fb58..3ae3cd64 100644
--- a/folly/build/GenerateFingerprintTables.cpp
+++ b/folly/build/GenerateFingerprintTables.cpp
@@ -71,7 +71,7 @@ void computeTables(FILE* file, const FingerprintPolynomial<DEG>& poly) {
   // where k is the number of bits in the fingerprint (and deg(P)) and
   // Q(X) = q7*X^7 + q6*X^6 + ... + q1*X + q0 is a degree-7 polyonomial
   // whose coefficients are the bits of q.
-  for (int x = 0; x < 256; x++) {
+  for (uint16_t x = 0; x < 256; x++) {
     FingerprintPolynomial<DEG> t;
     t.setHigh8Bits(uint8_t(x));
     for (int i = 0; i < 8; i++) {
diff --git a/folly/detail/CacheLocality.cpp b/folly/detail/CacheLocality.cpp
index 7b9f7e9d..09da2871 100644
--- a/folly/detail/CacheLocality.cpp
+++ b/folly/detail/CacheLocality.cpp
@@ -156,11 +156,12 @@ CacheLocality CacheLocality::readFromSysfsTree(
               // a sub-optimal ordering, but it won't crash
               auto& lhsEquiv = equivClassesByCpu[lhs];
               auto& rhsEquiv = equivClassesByCpu[rhs];
-              for (int i = int(std::min(lhsEquiv.size(), rhsEquiv.size())) - 1;
+              for (ssize_t i = ssize_t(std::min(lhsEquiv.size(), rhsEquiv.size())) - 1;
                    i >= 0;
                    --i) {
-                if (lhsEquiv[i] != rhsEquiv[i]) {
-                  return lhsEquiv[i] < rhsEquiv[i];
+                auto idx = size_t(i);
+                if (lhsEquiv[idx] != rhsEquiv[idx]) {
+                  return lhsEquiv[idx] < rhsEquiv[idx];
                 }
               }
 
diff --git a/folly/detail/IPAddressSource.h b/folly/detail/IPAddressSource.h
index a785c985..53ae7b6d 100644
--- a/folly/detail/IPAddressSource.h
+++ b/folly/detail/IPAddressSource.h
@@ -92,17 +92,21 @@ struct Bytes {
       ba[byteIndex] = one[byteIndex];
       ++byteIndex;
     }
-    auto bitIndex = std::min(mask, (uint8_t)(byteIndex * 8));
+    auto bitIndex = std::min(mask, uint8_t(byteIndex * 8));
+    uint8_t bI = uint8_t(bitIndex / 8);
+    uint8_t bM = uint8_t(bitIndex % 8);
     // Compute the bit up to which the two byte arrays match in the
     // unmatched byte.
     // Here the check is bitIndex < mask since the 0th mask entry in
     // kMasks array holds the mask for masking the MSb in this byte.
     // We could instead make it hold so that no 0th entry masks no
     // bits but thats a useless iteration.
-    while (bitIndex < mask && ((one[bitIndex / 8] & kMasks[bitIndex % 8]) ==
-                               (two[bitIndex / 8] & kMasks[bitIndex % 8]))) {
-      ba[bitIndex / 8] = one[bitIndex / 8] & kMasks[bitIndex % 8];
+    while (bitIndex < mask &&
+           ((one[bI] & kMasks[bM]) == (two[bI] & kMasks[bM]))) {
+      ba[bI] = uint8_t(one[bI] & kMasks[bM]);
       ++bitIndex;
+      bI = uint8_t(bitIndex / 8);
+      bM = uint8_t(bitIndex % 8);
     }
     return {ba, bitIndex};
   }
@@ -190,7 +194,7 @@ inline void writeIntegerString(IntegralType val, char** buffer) {
   }
 
   IntegralType powerToPrint = 1;
-  for (int i = 1; i < DigitCount; ++i) {
+  for (IntegralType i = 1; i < DigitCount; ++i) {
     powerToPrint *= Base;
   }
 
diff --git a/folly/detail/RangeSse42.cpp b/folly/detail/RangeSse42.cpp
index d54a4a36..fc9a759f 100644
--- a/folly/detail/RangeSse42.cpp
+++ b/folly/detail/RangeSse42.cpp
@@ -160,7 +160,7 @@ size_t scanHaystackBlock(const StringPieceLite haystack,
   // This load is safe because needles.size() >= 16
   auto arr2 = _mm_loadu_si128(
       reinterpret_cast<const __m128i*>(needles.data()));
-  size_t b =
+  auto b =
       _mm_cmpestri(arr2, 16, arr1, int(haystack.size() - blockStartIdx), 0);
 
   size_t j = nextAlignedIndex(needles.data());
@@ -174,7 +174,7 @@ size_t scanHaystackBlock(const StringPieceLite haystack,
         arr1,
         int(haystack.size() - blockStartIdx),
         0);
-    b = std::min<size_t>(index, b);
+    b = std::min(index, b);
   }
 
   if (b < 16) {
diff --git a/folly/experimental/EliasFanoCoding.h b/folly/experimental/EliasFanoCoding.h
index acc949a5..b10e63a2 100644
--- a/folly/experimental/EliasFanoCoding.h
+++ b/folly/experimental/EliasFanoCoding.h
@@ -580,8 +580,8 @@ class EliasFanoReader {
       return true;
     }
 
-    size_t upperValue = (value >> numLowerBits_);
-    size_t upperSkip = upperValue - upper_.value();
+    ValueType upperValue = (value >> numLowerBits_);
+    ValueType upperSkip = upperValue - upper_.value();
     // The average density of ones in upper bits is 1/2.
     // LIKELY here seems to make things worse, even for small skips.
     if (upperSkip < 2 * kLinearScanThreshold) {
diff --git a/folly/fibers/test/FibersTest.cpp b/folly/fibers/test/FibersTest.cpp
index f2892c09..fd180173 100644
--- a/folly/fibers/test/FibersTest.cpp
+++ b/folly/fibers/test/FibersTest.cpp
@@ -292,7 +292,7 @@ TEST(FiberManager, addTasksNoncopyable) {
     if (!taskAdded) {
       manager.addTask([&]() {
         std::vector<std::function<std::unique_ptr<int>()>> funcs;
-        for (size_t i = 0; i < 3; ++i) {
+        for (int i = 0; i < 3; ++i) {
           funcs.push_back([i, &pendingFibers]() {
             await([&pendingFibers](Promise<int> promise) {
               pendingFibers.push_back(std::move(promise));
diff --git a/folly/futures/Future-inl.h b/folly/futures/Future-inl.h
index 28935a21..cf3e3344 100644
--- a/folly/futures/Future-inl.h
+++ b/folly/futures/Future-inl.h
@@ -586,7 +586,7 @@ collectAll(InputIterator first, InputIterator last) {
     typename std::iterator_traits<InputIterator>::value_type::value_type T;
 
   struct CollectAllContext {
-    CollectAllContext(int n) : results(n) {}
+    CollectAllContext(size_t n) : results(n) {}
     ~CollectAllContext() {
       p.setValue(std::move(results));
     }
@@ -622,7 +622,7 @@ struct CollectContext {
     Nothing,
     std::vector<Optional<T>>>::type;
 
-  explicit CollectContext(int n) : result(n) {}
+  explicit CollectContext(size_t n) : result(n) {}
   ~CollectContext() {
     if (!threw.exchange(true)) {
       // map Optional<T> -> T
diff --git a/folly/gen/Parallel-inl.h b/folly/gen/Parallel-inl.h
index 18c7c434..1e225c20 100644
--- a/folly/gen/Parallel-inl.h
+++ b/folly/gen/Parallel-inl.h
@@ -49,7 +49,7 @@ class ClosableMPMCQueue {
   void openConsumer() { ++consumers_; }
 
   void closeInputProducer() {
-    int64_t producers = producers_--;
+    size_t producers = producers_--;
     CHECK(producers);
     if (producers == 1) { // last producer
       wakeConsumer_.notifyAll();
@@ -57,7 +57,7 @@ class ClosableMPMCQueue {
   }
 
   void closeOutputConsumer() {
-    int64_t consumers = consumers_--;
+    size_t consumers = consumers_--;
     CHECK(consumers);
     if (consumers == 1) { // last consumer
       wakeProducer_.notifyAll();
diff --git a/folly/gen/String-inl.h b/folly/gen/String-inl.h
index 780d8f08..4d6061bb 100644
--- a/folly/gen/String-inl.h
+++ b/folly/gen/String-inl.h
@@ -76,7 +76,7 @@ inline size_t splitPrefix(StringPiece& in,
   auto p = in.find_first_of(kCRLF);
   if (p != std::string::npos) {
     const auto in_start = in.data();
-    auto delim_len = 1;
+    size_t delim_len = 1;
     in.advance(p);
     // Either remove an MS-DOS CR-LF 2-byte newline, or eat 1 byte at a time.
     if (in.removePrefix(kCRLF)) {
diff --git a/folly/io/Cursor.cpp b/folly/io/Cursor.cpp
index f2dc1c3e..9d51092e 100644
--- a/folly/io/Cursor.cpp
+++ b/folly/io/Cursor.cpp
@@ -43,28 +43,30 @@ void Appender::vprintf(const char* fmt, va_list ap) {
   if (ret < 0) {
     throw std::runtime_error("error formatting printf() data");
   }
+  auto len = size_t(ret);
   // vsnprintf() returns the number of characters that would be printed,
   // not including the terminating nul.
-  if (size_t(ret) < length()) {
+  if (len < length()) {
     // All of the data was successfully written.
-    append(ret);
+    append(len);
     return;
   }
 
   // There wasn't enough room for the data.
   // Allocate more room, and then retry.
-  ensure(ret + 1);
+  ensure(len + 1);
   ret = vsnprintf(reinterpret_cast<char*>(writableData()), length(),
                   fmt, apCopy);
   if (ret < 0) {
     throw std::runtime_error("error formatting printf() data");
   }
-  if (size_t(ret) >= length()) {
+  len = size_t(ret);
+  if (len >= length()) {
     // This shouldn't ever happen.
     throw std::runtime_error("unexpectedly out of buffer space on second "
                              "vsnprintf() attmept");
   }
-  append(ret);
+  append(len);
 }
 
 }}  // folly::io
diff --git a/folly/io/async/AsyncSSLSocket.cpp b/folly/io/async/AsyncSSLSocket.cpp
index 80660352..60f297c2 100644
--- a/folly/io/async/AsyncSSLSocket.cpp
+++ b/folly/io/async/AsyncSSLSocket.cpp
@@ -1313,7 +1313,7 @@ AsyncSSLSocket::performRead(void** buf, size_t* buflen, size_t* offset) {
       if (zero_return(error, bytes)) {
         return ReadResult(bytes);
       }
-      long errError = ERR_get_error();
+      auto errError = ERR_get_error();
       VLOG(6) << "AsyncSSLSocket(fd=" << fd_ << ", "
               << "state=" << state_ << ", "
               << "sslState=" << sslState_ << ", "
diff --git a/folly/io/async/ssl/OpenSSLUtils.cpp b/folly/io/async/ssl/OpenSSLUtils.cpp
index 71ac6287..232fcc41 100644
--- a/folly/io/async/ssl/OpenSSLUtils.cpp
+++ b/folly/io/async/ssl/OpenSSLUtils.cpp
@@ -127,7 +127,7 @@ bool OpenSSLUtils::validatePeerCertNames(X509* cert,
     if ((addr4 != nullptr || addr6 != nullptr) && name->type == GEN_IPADD) {
       // Extra const-ness for paranoia
       unsigned char const* const rawIpStr = name->d.iPAddress->data;
-      int const rawIpLen = name->d.iPAddress->length;
+      size_t const rawIpLen = size_t(name->d.iPAddress->length);
 
       if (rawIpLen == 4 && addr4 != nullptr) {
         if (::memcmp(rawIpStr, &addr4->sin_addr, rawIpLen) == 0) {
@@ -260,7 +260,11 @@ int OpenSSLUtils::getBioFd(BIO* b, int* fd) {
 
 void OpenSSLUtils::setBioFd(BIO* b, int fd, int flags) {
 #ifdef _WIN32
-  SOCKET sock = portability::sockets::fd_to_socket(fd);
+  SOCKET socket = portability::sockets::fd_to_socket(fd);
+  // Internally OpenSSL uses this as an int for reasons completely
+  // beyond any form of sanity, so we do the cast ourselves to avoid
+  // the warnings that would be generated.
+  int sock = int(socket);
 #else
   int sock = fd;
 #endif
diff --git a/folly/io/test/IOBufCursorTest.cpp b/folly/io/test/IOBufCursorTest.cpp
index cae9c496..2dc1766c 100644
--- a/folly/io/test/IOBufCursorTest.cpp
+++ b/folly/io/test/IOBufCursorTest.cpp
@@ -451,8 +451,8 @@ TEST(IOBuf, Appender) {
   append(head, "hello");
 
   Appender app(head.get(), 10);
-  uint32_t cap = head->capacity();
-  uint32_t len1 = app.length();
+  auto cap = head->capacity();
+  auto len1 = app.length();
   EXPECT_EQ(cap - 5, len1);
   app.ensure(len1);  // won't grow
   EXPECT_EQ(len1, app.length());
diff --git a/folly/io/test/IOBufQueueTest.cpp b/folly/io/test/IOBufQueueTest.cpp
index 9fc35120..bc2a8901 100644
--- a/folly/io/test/IOBufQueueTest.cpp
+++ b/folly/io/test/IOBufQueueTest.cpp
@@ -43,8 +43,7 @@ struct Initializer {
 };
 Initializer initializer;
 
-unique_ptr<IOBuf>
-stringToIOBuf(const char* s, uint32_t len) {
+unique_ptr<IOBuf> stringToIOBuf(const char* s, size_t len) {
   unique_ptr<IOBuf> buf = IOBuf::create(len);
   memcpy(buf->writableTail(), s, len);
   buf->append(len);
diff --git a/folly/json.cpp b/folly/json.cpp
index 4ecb264f..48ef486e 100644
--- a/folly/json.cpp
+++ b/folly/json.cpp
@@ -178,25 +178,20 @@ private:
  serialization_opts const& opts_;
 };
 
-  //////////////////////////////////////////////////////////////////////
-
-  struct ParseError : std::runtime_error {
-    explicit ParseError(int line)
-      : std::runtime_error(to<std::string>("json parse error on line ", line))
-    {}
-
-    explicit ParseError(int line, std::string const& context,
-        std::string const& expected)
-      : std::runtime_error(to<std::string>("json parse error on line ", line,
-          !context.empty() ? to<std::string>(" near `", context, '\'')
-                          : "",
-          ": ", expected))
-    {}
-
-    explicit ParseError(std::string const& msg)
-      : std::runtime_error("json parse error: " + msg)
-    {}
-  };
+//////////////////////////////////////////////////////////////////////
+
+struct ParseError : std::runtime_error {
+  explicit ParseError(
+      unsigned int line,
+      std::string const& context,
+      std::string const& expected)
+      : std::runtime_error(to<std::string>(
+            "json parse error on line ",
+            line,
+            !context.empty() ? to<std::string>(" near `", context, '\'') : "",
+            ": ",
+            expected)) {}
+};
 
 // Wraps our input buffer with some helper functions.
 struct Input {
diff --git a/folly/portability/Stdio.cpp b/folly/portability/Stdio.cpp
index 1e363ee0..4b3c664b 100755
--- a/folly/portability/Stdio.cpp
+++ b/folly/portability/Stdio.cpp
@@ -27,14 +27,16 @@ int dprintf(int fd, const char* fmt, ...) {
   va_start(args, fmt);
   SCOPE_EXIT { va_end(args); };
 
-  int len = vsnprintf(nullptr, 0, fmt, args);
-  if (len <= 0) {
+  int ret = vsnprintf(nullptr, 0, fmt, args);
+  if (ret <= 0) {
     return -1;
   }
+  size_t len = size_t(ret);
   char* buf = new char[len + 1];
   SCOPE_EXIT { delete[] buf; };
-  if (vsnprintf(buf, len + 1, fmt, args) == len && write(fd, buf, len) == len) {
-    return len;
+  if (size_t(vsnprintf(buf, len + 1, fmt, args)) == len &&
+      write(fd, buf, len) == ssize_t(len)) {
+    return ret;
   }
 
   return -1;
@@ -53,8 +55,8 @@ int vasprintf(char** dest, const char* format, va_list ap) {
   if (len <= 0) {
     return -1;
   }
-  char* buf = *dest = (char*)malloc(len + 1);
-  if (vsnprintf(buf, len + 1, format, ap) == len) {
+  char* buf = *dest = (char*)malloc(size_t(len + 1));
+  if (vsnprintf(buf, size_t(len + 1), format, ap) == len) {
     return len;
   }
   free(buf);
diff --git a/folly/stats/BucketedTimeSeries-defs.h b/folly/stats/BucketedTimeSeries-defs.h
index ddb77569..a24aff86 100644
--- a/folly/stats/BucketedTimeSeries-defs.h
+++ b/folly/stats/BucketedTimeSeries-defs.h
@@ -52,15 +52,15 @@ template <typename VT, typename CT>
 bool BucketedTimeSeries<VT, CT>::addValue(
     TimePoint now,
     const ValueType& val,
-    int64_t times) {
-  return addValueAggregated(now, val * times, times);
+    uint64_t times) {
+  return addValueAggregated(now, val * ValueType(times), times);
 }
 
 template <typename VT, typename CT>
 bool BucketedTimeSeries<VT, CT>::addValueAggregated(
     TimePoint now,
     const ValueType& total,
-    int64_t nsamples) {
+    uint64_t nsamples) {
   if (isAllTime()) {
     if (UNLIKELY(empty())) {
       firstTime_ = now;
diff --git a/folly/stats/BucketedTimeSeries.h b/folly/stats/BucketedTimeSeries.h
index 0b531d8f..14468d61 100644
--- a/folly/stats/BucketedTimeSeries.h
+++ b/folly/stats/BucketedTimeSeries.h
@@ -104,13 +104,13 @@ class BucketedTimeSeries {
   /*
    * Adds the value 'val' the given number of 'times' at time 'now'
    */
-  bool addValue(TimePoint now, const ValueType& val, int64_t times);
+  bool addValue(TimePoint now, const ValueType& val, uint64_t times);
 
   /*
    * Adds the value 'total' as the sum of 'nsamples' samples
    */
   bool
-  addValueAggregated(TimePoint now, const ValueType& total, int64_t nsamples);
+  addValueAggregated(TimePoint now, const ValueType& total, uint64_t nsamples);
 
   /*
    * Updates the container to the specified time, doing all the necessary
@@ -413,11 +413,11 @@ class BucketedTimeSeries {
   bool addValue(Duration now, const ValueType& val) {
     return addValueAggregated(TimePoint(now), val, 1);
   }
-  bool addValue(Duration now, const ValueType& val, int64_t times) {
+  bool addValue(Duration now, const ValueType& val, uint64_t times) {
     return addValueAggregated(TimePoint(now), val * ValueType(times), times);
   }
   bool
-  addValueAggregated(Duration now, const ValueType& total, int64_t nsamples) {
+  addValueAggregated(Duration now, const ValueType& total, uint64_t nsamples) {
     return addValueAggregated(TimePoint(now), total, nsamples);
   }
   size_t update(Duration now) {
diff --git a/folly/stats/MultiLevelTimeSeries.h b/folly/stats/MultiLevelTimeSeries.h
index 8a84dd1c..10e41e59 100644
--- a/folly/stats/MultiLevelTimeSeries.h
+++ b/folly/stats/MultiLevelTimeSeries.h
@@ -432,7 +432,7 @@ class MultiLevelTimeSeries {
   // or flush() is called.
   TimePoint cachedTime_;
   ValueType cachedSum_;
-  int cachedCount_;
+  uint64_t cachedCount_;
 };
 
 } // folly
diff --git a/folly/stats/TimeseriesHistogram.h b/folly/stats/TimeseriesHistogram.h
index dd286b92..5b6bc8fb 100644
--- a/folly/stats/TimeseriesHistogram.h
+++ b/folly/stats/TimeseriesHistogram.h
@@ -135,7 +135,7 @@ class TimeseriesHistogram {
   }
 
   /* Total sum of values at the given timeseries level (all buckets). */
-  ValueType sum(int level) const {
+  ValueType sum(size_t level) const {
     ValueType total = ValueType();
     for (size_t b = 0; b < buckets_.getNumBuckets(); ++b) {
       total += buckets_.getByIndex(b).sum(level);
@@ -154,7 +154,7 @@ class TimeseriesHistogram {
 
   /* Average of values at the given timeseries level (all buckets). */
   template <typename ReturnType = double>
-  ReturnType avg(int level) const {
+  ReturnType avg(size_t level) const {
     auto total = ValueType();
     uint64_t nsamples = 0;
     computeAvgData(&total, &nsamples, level);
diff --git a/folly/test/DeterministicSchedule.cpp b/folly/test/DeterministicSchedule.cpp
index a3b0e44c..87ecb762 100644
--- a/folly/test/DeterministicSchedule.cpp
+++ b/folly/test/DeterministicSchedule.cpp
@@ -43,7 +43,7 @@ static std::unordered_map<detail::Futex<DeterministicAtomic>*,
 static std::mutex futexLock;
 
 DeterministicSchedule::DeterministicSchedule(
-    const std::function<int(int)>& scheduler)
+    const std::function<size_t(size_t)>& scheduler)
     : scheduler_(scheduler), nextThreadId_(1), step_(0) {
   assert(tls_sem == nullptr);
   assert(tls_sched == nullptr);
@@ -63,16 +63,16 @@ DeterministicSchedule::~DeterministicSchedule() {
   beforeThreadExit();
 }
 
-std::function<int(int)> DeterministicSchedule::uniform(long seed) {
+std::function<size_t(size_t)> DeterministicSchedule::uniform(uint64_t seed) {
   auto rand = std::make_shared<std::ranlux48>(seed);
   return [rand](size_t numActive) {
-    auto dist = std::uniform_int_distribution<int>(0, numActive - 1);
+    auto dist = std::uniform_int_distribution<size_t>(0, numActive - 1);
     return dist(*rand);
   };
 }
 
 struct UniformSubset {
-  UniformSubset(long seed, int subsetSize, int stepsBetweenSelect)
+  UniformSubset(uint64_t seed, size_t subsetSize, size_t stepsBetweenSelect)
       : uniform_(DeterministicSchedule::uniform(seed)),
         subsetSize_(subsetSize),
         stepsBetweenSelect_(stepsBetweenSelect),
@@ -88,13 +88,13 @@ struct UniformSubset {
   }
 
  private:
-  std::function<int(int)> uniform_;
+  std::function<size_t(size_t)> uniform_;
   const size_t subsetSize_;
-  const int stepsBetweenSelect_;
+  const size_t stepsBetweenSelect_;
 
-  int stepsLeft_;
+  size_t stepsLeft_;
   // only the first subsetSize_ is properly randomized
-  std::vector<int> perm_;
+  std::vector<size_t> perm_;
 
   void adjustPermSize(size_t numActive) {
     if (perm_.size() > numActive) {
@@ -112,15 +112,14 @@ struct UniformSubset {
 
   void shufflePrefix() {
     for (size_t i = 0; i < std::min(perm_.size() - 1, subsetSize_); ++i) {
-      int j = uniform_(perm_.size() - i) + i;
+      size_t j = uniform_(perm_.size() - i) + i;
       std::swap(perm_[i], perm_[j]);
     }
   }
 };
 
-std::function<int(int)> DeterministicSchedule::uniformSubset(long seed,
-                                                             int n,
-                                                             int m) {
+std::function<size_t(size_t)>
+DeterministicSchedule::uniformSubset(uint64_t seed, size_t n, size_t m) {
   auto gen = std::make_shared<UniformSubset>(seed, n, m);
   return [=](size_t numActive) { return (*gen)(numActive); };
 }
@@ -148,7 +147,7 @@ void DeterministicSchedule::afterSharedAccess(bool success) {
   sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
 }
 
-int DeterministicSchedule::getRandNumber(int n) {
+size_t DeterministicSchedule::getRandNumber(size_t n) {
   if (tls_sched) {
     return tls_sched->scheduler_(n);
   }
diff --git a/folly/test/DeterministicSchedule.h b/folly/test/DeterministicSchedule.h
index fbcee274..e4a6f95d 100644
--- a/folly/test/DeterministicSchedule.h
+++ b/folly/test/DeterministicSchedule.h
@@ -83,7 +83,8 @@ class DeterministicSchedule : boost::noncopyable {
    * DeterministicSchedule::thread on a thread participating in this
    * schedule) to participate in a deterministic schedule.
    */
-  explicit DeterministicSchedule(const std::function<int(int)>& scheduler);
+  explicit DeterministicSchedule(
+      const std::function<size_t(size_t)>& scheduler);
 
   /** Completes the schedule. */
   ~DeterministicSchedule();
@@ -95,7 +96,7 @@ class DeterministicSchedule : boost::noncopyable {
    * inter-thread communication are random variables following a poisson
    * distribution.
    */
-  static std::function<int(int)> uniform(long seed);
+  static std::function<size_t(size_t)> uniform(uint64_t seed);
 
   /**
    * Returns a scheduling function that chooses a subset of the active
@@ -103,9 +104,8 @@ class DeterministicSchedule : boost::noncopyable {
    * runnable thread.  The subset is chosen with size n, and the choice
    * is made every m steps.
    */
-  static std::function<int(int)> uniformSubset(long seed,
-                                               int n = 2,
-                                               int m = 64);
+  static std::function<size_t(size_t)>
+  uniformSubset(uint64_t seed, size_t n = 2, size_t m = 64);
 
   /** Obtains permission for the current thread to perform inter-thread
    *  communication. */
@@ -166,7 +166,7 @@ class DeterministicSchedule : boost::noncopyable {
 
   /** Used scheduler_ to get a random number b/w [0, n). If tls_sched is
    *  not set-up it falls back to std::rand() */
-  static int getRandNumber(int n);
+  static size_t getRandNumber(size_t n);
 
   /** Deterministic implemencation of getcpu */
   static int getcpu(unsigned* cpu, unsigned* node, void* unused);
@@ -194,7 +194,7 @@ class DeterministicSchedule : boost::noncopyable {
   static thread_local AuxAct tls_aux_act;
   static AuxChk aux_chk;
 
-  std::function<int(int)> scheduler_;
+  std::function<size_t(size_t)> scheduler_;
   std::vector<sem_t*> sems_;
   std::unordered_set<std::thread::id> active_;
   unsigned nextThreadId_;
diff --git a/folly/test/IndexedMemPoolTest.cpp b/folly/test/IndexedMemPoolTest.cpp
index 109c63f5..69e83124 100644
--- a/folly/test/IndexedMemPoolTest.cpp
+++ b/folly/test/IndexedMemPoolTest.cpp
@@ -50,7 +50,7 @@ TEST(IndexedMemPool, unique_ptr) {
 
 TEST(IndexedMemPool, no_starvation) {
   const int count = 1000;
-  const int poolSize = 100;
+  const uint32_t poolSize = 100;
 
   typedef DeterministicSchedule Sched;
   Sched sched(Sched::uniform(0));
@@ -157,16 +157,16 @@ TEST(IndexedMemPool, locate_elem) {
 }
 
 struct NonTrivialStruct {
-  static FOLLY_TLS int count;
+  static FOLLY_TLS size_t count;
 
-  int elem_;
+  size_t elem_;
 
   NonTrivialStruct() {
     elem_ = 0;
     ++count;
   }
 
-  NonTrivialStruct(std::unique_ptr<std::string>&& arg1, int arg2) {
+  NonTrivialStruct(std::unique_ptr<std::string>&& arg1, size_t arg2) {
     elem_ = arg1->length() + arg2;
     ++count;
   }
@@ -176,7 +176,7 @@ struct NonTrivialStruct {
   }
 };
 
-FOLLY_TLS int NonTrivialStruct::count;
+FOLLY_TLS size_t NonTrivialStruct::count;
 
 TEST(IndexedMemPool, eager_recycle) {
   typedef IndexedMemPool<NonTrivialStruct> Pool;
diff --git a/folly/test/MergeTest.cpp b/folly/test/MergeTest.cpp
index 16e23e1c..bfd5c2a1 100644
--- a/folly/test/MergeTest.cpp
+++ b/folly/test/MergeTest.cpp
@@ -30,7 +30,7 @@ TEST(MergeTest, NonOverlapping) {
                b.begin(), b.end(),
                std::back_inserter(c));
   EXPECT_EQ(8, c.size());
-  for (int i = 0; i < 8; ++i) {
+  for (size_t i = 0; i < 8; ++i) {
     EXPECT_EQ(i, c[i]);
   }
 }