From: Mike Kolupaev <kolmike@fb.com>
Date: Tue, 23 Dec 2014 12:28:32 +0000 (-0800)
Subject: folly::AtomicHashMap: fixed race between erase() and find()
X-Git-Tag: v0.22.0~74
X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=b4746252b468b726482d491994d9be8905de13a4;p=folly.git

folly::AtomicHashMap: fixed race between erase() and find()

Summary: advancePastEmpty() was called for all created iterators. It only makes sense for begin(). For find() it's harmful: find() shouldn't return an iterator to the next element if the key was removed. I suspect that the same race condition was possible for insert(), but I haven't tried to reproduce it.

Test Plan: Added a test for it. It fails without these changes.

Reviewed By: delong.j@fb.com

Subscribers: folly-diffs@, lovro

FB internal diff: D1751280

Tasks: 5841499

Signature: t1:1751280:1419107193:71311ff68d92d0a4dcf1941dacdfdc23c25255cc
---

diff --git a/folly/AtomicHashArray-inl.h b/folly/AtomicHashArray-inl.h
index 95c86df3..bfc1637d 100644
--- a/folly/AtomicHashArray-inl.h
+++ b/folly/AtomicHashArray-inl.h
@@ -353,15 +353,19 @@ struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
   explicit aha_iterator(ContT* array, size_t offset)
       : aha_(array)
       , offset_(offset)
-  {
-    advancePastEmpty();
-  }
+  {}
 
   // Returns unique index that can be used with findAt().
   // WARNING: The following function will fail silently for hashtable
   // with capacity > 2^32
   uint32_t getIndex() const { return offset_; }
 
+  void advancePastEmpty() {
+    while (offset_ < aha_->capacity_ && !isValid()) {
+      ++offset_;
+    }
+  }
+
  private:
   friend class AtomicHashArray;
   friend class boost::iterator_core_access;
@@ -379,12 +383,6 @@ struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
     return aha_->cells_[offset_];
   }
 
-  void advancePastEmpty() {
-    while (offset_ < aha_->capacity_ && !isValid()) {
-      ++offset_;
-    }
-  }
-
   bool isValid() const {
     KeyT key = acquireLoadKey(aha_->cells_[offset_]);
     return key != aha_->kEmptyKey_  &&
diff --git a/folly/AtomicHashArray.h b/folly/AtomicHashArray.h
index 18e883a4..f00e3074 100644
--- a/folly/AtomicHashArray.h
+++ b/folly/AtomicHashArray.h
@@ -183,9 +183,18 @@ class AtomicHashArray : boost::noncopyable {
 
   bool empty() const { return size() == 0; }
 
-  iterator begin()             { return iterator(this, 0); }
+  iterator begin() {
+    iterator it(this, 0);
+    it.advancePastEmpty();
+    return it;
+  }
+  const_iterator begin() const {
+    const_iterator it(this, 0);
+    it.advancePastEmpty();
+    return it;
+  }
+
   iterator end()               { return iterator(this, capacity_); }
-  const_iterator begin() const { return const_iterator(this, 0); }
   const_iterator end() const   { return const_iterator(this, capacity_); }
 
   // See AtomicHashMap::findAt - access elements directly
diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h
index 11167b8d..c4a0f56b 100644
--- a/folly/AtomicHashMap-inl.h
+++ b/folly/AtomicHashMap-inl.h
@@ -370,9 +370,7 @@ struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::ahm_iterator
       : ahm_(ahm)
       , subMap_(subMap)
       , subIt_(subIt)
-  {
-    checkAdvanceToNextSubmap();
-  }
+  {}
 
   friend class boost::iterator_core_access;
 
diff --git a/folly/AtomicHashMap.h b/folly/AtomicHashMap.h
index f1fd787a..a5352b00 100644
--- a/folly/AtomicHashMap.h
+++ b/folly/AtomicHashMap.h
@@ -318,17 +318,21 @@ class AtomicHashMap : boost::noncopyable {
   }
 
   iterator begin() {
-    return iterator(this, 0,
+    iterator it(this, 0,
       subMaps_[0].load(std::memory_order_relaxed)->begin());
-  }
-
-  iterator end() {
-    return iterator();
+    it.checkAdvanceToNextSubmap();
+    return it;
   }
 
   const_iterator begin() const {
-    return const_iterator(this, 0,
+    const_iterator it(this, 0,
       subMaps_[0].load(std::memory_order_relaxed)->begin());
+    it.checkAdvanceToNextSubmap();
+    return it;
+  }
+
+  iterator end() {
+    return iterator();
   }
 
   const_iterator end() const {
diff --git a/folly/test/AtomicHashMapTest.cpp b/folly/test/AtomicHashMapTest.cpp
index 16e17e14..8a88031e 100644
--- a/folly/test/AtomicHashMapTest.cpp
+++ b/folly/test/AtomicHashMapTest.cpp
@@ -618,6 +618,44 @@ TEST(Ahm, atomic_hash_array_insert_race) {
   }
 }
 
+// Repro for T#5841499. Race between erase() and find() on the same key.
+TEST(Ahm, erase_find_race) {
+  const uint64_t limit = 10000;
+  AtomicHashMap<uint64_t, uint64_t> map(limit + 10);
+  std::atomic<uint64_t> key {1};
+
+  // Invariant: all values are equal to their keys.
+  // At any moment there is one or two consecutive keys in the map.
+
+  std::thread write_thread([&]() {
+    while (true) {
+      uint64_t k = ++key;
+      if (k > limit) {
+        break;
+      }
+      map.insert(k + 1, k + 1);
+      map.erase(k);
+    }
+  });
+
+  std::thread read_thread([&]() {
+    while (true) {
+      uint64_t k = key.load();
+      if (k > limit) {
+        break;
+      }
+
+      auto it = map.find(k);
+      if (it != map.end()) {
+        ASSERT_EQ(k, it->second);
+      }
+    }
+  });
+
+  read_thread.join();
+  write_thread.join();
+}
+
 // Repro for a bug when iterator didn't skip empty submaps.
 TEST(Ahm, iterator_skips_empty_submaps) {
   AtomicHashMap<uint64_t, uint64_t>::Config config;