1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
24 This header file declares and defines a simplified version of Cliff Click's
25 NonblockingHashMap. It contains all the necessary structrues and main
26 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
34 Corresponding the the Object[] array in Cliff Click's Java implementation.
35 It keeps the first two slots for CHM (Hashtable control unit) and the hash
36 records (an array of hash used for fast negative key-equality check).
44 int real_size = sz * 2 + 2;
45 _data = new atomic<void*>[real_size];
46 // The control block should be initialized in resize()
47 // Init the hash record array
48 int *hashes = new int[_size];
50 for (i = 0; i < _size; i++) {
53 // Init the data to Null slot
54 for (i = 2; i < real_size; i++) {
55 _data[i].store(NULL, memory_order_relaxed);
57 _data[1].store(hashes, memory_order_relaxed);
61 int *hashes = (int*) _data[1].load(memory_order_relaxed);
71 slot(bool prime, void *ptr) {
79 TypeK must have defined function "int hashCode()" which return the hash
80 code for the its object, and "int equals(TypeK anotherKey)" which is
81 used to judge equality.
82 TypeK and TypeV should define their own copy constructor.
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
92 # The synchronization we have for the hashtable gives us the property of
93 # serializability, so we should have a sequential hashtable when we check the
94 # correctness. The key thing is to identify all the commit point.
99 CLASS = cliffc_hashtable;
106 map = new_spec_table_default(equals_key);
107 id_map = new_spec_table_default(equals_id);
111 bool equals_key(void *ptr1, void *ptr2) {
112 TypeK *key1 = (TypeK*) ptr1,
113 *key2 = (TypeK*) ptr2;
114 if (key1 == NULL || key2 == NULL)
116 return key1->equals(key2);
120 bool equals_val(void *ptr1, void *ptr2) {
123 TypeV *val1 = (TypeV*) ptr1,
124 *val2 = (TypeV*) ptr2;
125 if (val1 == NULL || val2 == NULL)
127 return val1->equals(val2);
131 bool equals_id(void *ptr1, void *ptr2) {
132 id_tag_t *id1 = (id_tag_t*) ptr1,
133 *id2 = (id_tag_t*) ptr2;
134 if (id1 == NULL || id2 == NULL)
136 return (*id1).tag == (*id2).tag;
140 # Update the tag for the current key slot if the corresponding tag
141 # is NULL, otherwise just return that tag. It will update the next
142 # available tag too if it requires a new tag for that key slot.
143 call_id_t getKeyTag(TypeK *key) {
144 if (!spec_table_contains(id_map, key)) {
145 call_id_t cur_id = current(tag);
146 spec_table_put(id_map, key, (void*) cur_id);
150 call_id_t res = (call_id_t) spec_table_get(id_map, key);
167 PutIfAbsent(COND_PutIfAbsentSucc),
169 RemoveIfMatch(COND_RemoveIfMatchSucc),
171 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
174 //Write_interface -> Read_interface
182 The control structure for the hashtable
186 friend class cliffc_hashtable;
188 atomic<kvs_data*> _newkvs;
190 // Size of active K,V pairs
193 // Count of used slots
196 // The next part of the table to copy
197 atomic_int _copy_idx;
199 // Work-done reporting
200 atomic_int _copy_done;
204 _newkvs.store(NULL, memory_order_relaxed);
205 _size.store(size, memory_order_relaxed);
206 _slots.store(0, memory_order_relaxed);
208 _copy_idx.store(0, memory_order_relaxed);
209 _copy_done.store(0, memory_order_relaxed);
216 // Heuristic to decide if the table is too full
217 bool table_full(int reprobe_cnt, int len) {
219 reprobe_cnt >= REPROBE_LIMIT &&
220 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
223 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
224 model_print("resizing...\n");
225 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
229 // No copy in-progress, start one; Only double the table size
230 int oldlen = kvs->_size;
231 int sz = _size.load(memory_order_relaxed);
234 // Just follow Cliff Click's heuristic to decide the new size
235 if (sz >= (oldlen >> 2)) { // If we are 25% full
236 newsz = oldlen << 1; // Double size
237 if (sz >= (oldlen >> 1))
238 newsz = oldlen << 2; // Double double size
241 // We do not record the record timestamp
242 if (newsz <= oldlen) newsz = oldlen << 1;
243 // Do not shrink ever
244 if (newsz < oldlen) newsz = oldlen;
246 // Last check cause the 'new' below is expensive
247 newkvs = _newkvs.load(memory_order_acquire);
248 model_print("hey1\n");
249 if (newkvs != NULL) return newkvs;
251 newkvs = new kvs_data(newsz);
252 void *chm = (void*) new CHM(sz);
253 model_print("hey2\n");
254 newkvs->_data[0].store(chm, memory_order_relaxed);
256 kvs_data *cur_newkvs;
257 // Another check after the slow allocation
258 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
260 // CAS the _newkvs to the allocated table
261 kvs_data *desired = (kvs_data*) NULL;
262 kvs_data *expected = (kvs_data*) newkvs;
263 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
264 memory_order_relaxed)) {
265 // Should clean the allocated area
267 newkvs = _newkvs.load(memory_order_acquire);
272 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
274 MODEL_ASSERT (get_chm(oldkvs) == this);
275 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
276 int oldlen = oldkvs->_size;
277 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
279 // Just follow Cliff Click's code here
280 int panic_start = -1;
282 while (_copy_done.load(memory_order_relaxed) < oldlen) {
283 copyidx = _copy_idx.load(memory_order_relaxed);
284 if (panic_start == -1) { // No painc
285 copyidx = _copy_idx.load(memory_order_relaxed);
286 while (copyidx < (oldlen << 1) &&
287 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
288 min_copy_work, memory_order_release, memory_order_relaxed))
289 copyidx = _copy_idx.load(memory_order_relaxed);
290 if (!(copyidx < (oldlen << 1)))
291 panic_start = copyidx;
294 // Now copy the chunk of work we claimed
296 for (int i = 0; i < min_copy_work; i++)
297 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
301 copy_check_and_promote(topmap, oldkvs, workdone);
303 copyidx += min_copy_work;
304 if (!copy_all && panic_start == -1)
305 return; // We are done with the work we claim
307 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
310 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
311 *oldkvs, int idx, void *should_help) {
312 kvs_data *newkvs = _newkvs.load(memory_order_relaxed);
313 // We're only here cause the caller saw a Prime
314 if (copy_slot(topmap, idx, oldkvs, newkvs))
315 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
316 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
319 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
320 oldkvs, int workdone) {
321 int oldlen = oldkvs->_size;
322 int copyDone = _copy_done.load(memory_order_relaxed);
325 copyDone = _copy_done.load(memory_order_relaxed);
326 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
327 workdone, memory_order_relaxed, memory_order_relaxed))
332 // Promote the new table to the current table
333 if (copyDone + workdone == oldlen &&
334 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
335 kvs_data *newkvs = _newkvs.load(memory_order_relaxed);
336 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
337 memory_order_relaxed);
341 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
344 while ((key_slot = key(oldkvs, idx)) == NULL)
345 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
347 // First CAS old to Prime
348 slot *oldval = val(oldkvs, idx);
349 while (!is_prime(oldval)) {
350 slot *box = (oldval == NULL || oldval == TOMBSTONE)
351 ? TOMBPRIME : new slot(true, oldval->_ptr);
352 if (CAS_val(oldkvs, idx, oldval, box)) {
353 if (box == TOMBPRIME)
354 return 1; // Copy done
355 // Otherwise we CAS'd the box
356 oldval = box; // Record updated oldval
359 oldval = val(oldkvs, idx); // Else re-try
362 if (oldval == TOMBPRIME) return false; // Copy already completed here
364 slot *old_unboxed = new slot(false, oldval->_ptr);
365 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
368 // Old value is exposed in the new table
369 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
370 oldval = val(oldkvs, idx);
372 return copied_into_new;
379 static const int Default_Init_Size = 4; // Intial table size
381 static slot* const MATCH_ANY;
382 static slot* const NO_MATCH_OLD;
384 static slot* const TOMBPRIME;
385 static slot* const TOMBSTONE;
387 static const int REPROBE_LIMIT = 10; // Forces a table-resize
389 atomic<kvs_data*> _kvs;
393 // Should initialize the CHM for the construction of the table
394 // For other CHM in kvs_data, they should be initialzed in resize()
395 // because the size is determined dynamically
401 kvs_data *kvs = new kvs_data(Default_Init_Size);
402 void *chm = (void*) new CHM(0);
403 kvs->_data[0].store(chm, memory_order_relaxed);
404 _kvs.store(kvs, memory_order_relaxed);
407 cliffc_hashtable(int init_size) {
408 // Should initialize the CHM for the construction of the table
409 // For other CHM in kvs_data, they should be initialzed in resize()
410 // because the size is determined dynamically
417 kvs_data *kvs = new kvs_data(init_size);
418 void *chm = (void*) new CHM(0);
419 kvs->_data[0].store(chm, memory_order_relaxed);
420 _kvs.store(kvs, memory_order_relaxed);
426 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
429 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
430 //bool passed = equals_val(_Old_Val, __RET__);
433 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
434 int ret = __RET__ == NULL ? 0 : __RET__->_val;
435 model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
436 key->_val, old, ret);
439 //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
440 equals_val(_Old_Val, __RET__)
443 TypeV* get(TypeK *key) {
444 slot *key_slot = new slot(false, key);
445 int fullhash = hash(key_slot);
446 kvs_data *kvs = _kvs.load(memory_order_relaxed);
447 slot *V = get_impl(this, kvs, key_slot, fullhash);
448 if (V == NULL) return NULL;
449 MODEL_ASSERT (!is_prime(V));
450 return (TypeV*) V->_ptr;
456 @Commit_point_set: Write_Success_Point
459 # Remember this old value at checking point
460 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
461 spec_table_put(map, key, val);
462 //bool passed = equals_val(__RET__, _Old_Val);
465 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
466 int ret = __RET__ == NULL ? 0 : __RET__->_val;
467 model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
468 key->_val, val->_val, old, ret);
471 equals_val(__RET__, _Old_Val)
474 TypeV* put(TypeK *key, TypeV *val) {
475 return putIfMatch(key, val, NO_MATCH_OLD);
480 @Interface: PutIfAbsent
482 Write_Success_Point | PutIfAbsent_Fail_Point
483 @Condition: !spec_table_contains(map, key)
485 COND_PutIfAbsentSucc :: __RET__ == NULL
488 void *_Old_Val = spec_table_get(map, key);
490 spec_table_put(map, key, value);
492 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
495 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
496 return putIfMatch(key, val, TOMBSTONE);
501 @Interface: RemoveAny
502 @Commit_point_set: Write_Success_Point
505 void *_Old_Val = spec_table_get(map, key);
506 spec_table_put(map, key, NULL);
508 equals_val(__RET__, _Old_Val)
511 TypeV* remove(TypeK *key) {
512 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
517 @Interface: RemoveIfMatch
519 Write_Success_Point | RemoveIfMatch_Fail_Point
521 equals_val(spec_table_get(map, key), val)
523 COND_RemoveIfMatchSucc :: __RET__ == true
527 spec_table_put(map, key, NULL);
529 __COND_SAT__ ? __RET__ : !__RET__
532 bool remove(TypeK *key, TypeV *val) {
533 slot *val_slot = val == NULL ? NULL : new slot(false, val);
534 return putIfMatch(key, TOMBSTONE, val) == val;
540 @Interface: ReplaceAny
545 void *_Old_Val = spec_table_get(map, key);
547 equals_val(__RET__, _Old_Val)
550 TypeV* replace(TypeK *key, TypeV *val) {
551 return putIfMatch(key, val, MATCH_ANY);
556 @Interface: ReplaceIfMatch
558 Write_Success_Point | ReplaceIfMatch_Fail_Point
560 equals_val(spec_table_get(map, key), oldval)
562 COND_ReplaceIfMatchSucc :: __RET__ == true
566 spec_table_put(map, key, newval);
568 __COND_SAT__ ? __RET__ : !__RET__
571 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
572 return putIfMatch(key, newval, oldval) == oldval;
576 static CHM* get_chm(kvs_data* kvs) {
577 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
581 static int* get_hashes(kvs_data *kvs) {
582 return (int *) kvs->_data[1].load(memory_order_relaxed);
585 // Preserve happens-before semantics on newly inserted keys
586 static inline slot* key(kvs_data *kvs, int idx) {
587 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
588 // Corresponding to the volatile read in get_impl() and putIfMatch in
589 // Cliff Click's Java implementation
590 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
593 # This is a complicated potential commit point since many many functions are
595 @Potential_commit_point_define: true
596 @Label: Read_Key_Point
603 The atomic operation in val() function is a "potential" commit point,
604 which means in some case it is a real commit point while it is not for
605 some other cases. This so happens because the val() function is such a
606 fundamental function that many internal operation will call. Our
607 strategy is that we label any potential commit points and check if they
608 really are the commit points later.
610 // Preserve happens-before semantics on newly inserted values
611 static inline slot* val(kvs_data *kvs, int idx) {
612 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
613 // Corresponding to the volatile read in get_impl() and putIfMatch in
614 // Cliff Click's Java implementation
615 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
618 # This is a complicated potential commit point since many many functions are
620 @Potential_commit_point_define: true
621 @Label: Read_Val_Point
629 static int hash(slot *key_slot) {
630 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
631 TypeK* key = (TypeK*) key_slot->_ptr;
632 int h = key->hashCode();
633 // Spread bits according to Cliff Click's code
634 h += (h << 15) ^ 0xffffcd7d;
638 h += (h << 2) + (h << 14);
639 return h ^ (h >> 16);
642 // Heuristic to decide if reprobed too many times.
643 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
644 // put it triggers a table resize. Several places MUST have exact agreement.
645 static int reprobe_limit(int len) {
646 return REPROBE_LIMIT + (len >> 2);
649 static inline bool is_prime(slot *val) {
650 return (val != NULL) && val->_prime;
653 // Check for key equality. Try direct pointer comparison first (fast
654 // negative teset) and then the full 'equals' call
655 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
657 // Caller should've checked this.
658 MODEL_ASSERT (K != NULL);
659 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
662 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
664 key_ptr->equals(K->_ptr));
667 static bool valeq(slot *val_slot1, slot *val_slot2) {
668 MODEL_ASSERT (val_slot1 != NULL);
669 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
670 if (val_slot2 == NULL || ptr1 == NULL) return false;
671 return ptr1->equals(val_slot2->_ptr);
674 // Together with key() preserve the happens-before relationship on newly
676 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
677 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
678 desired, memory_order_relaxed, memory_order_relaxed);
682 Same as the val() function, we only label the CAS operation as the
683 potential commit point.
685 // Together with val() preserve the happens-before relationship on newly
687 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
689 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
690 desired, memory_order_release, memory_order_relaxed);
692 # If it is a successful put instead of a copy or any other internal
693 # operantions, expected != NULL
695 @Potential_commit_point_define: res
696 @Label: Write_Val_Point
702 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
704 int len = kvs->_size;
705 CHM *chm = get_chm(kvs);
706 int *hashes = get_hashes(kvs);
708 int idx = fullhash & (len - 1);
711 slot *K = key(kvs, idx);
714 @Commit_point_define: K == NULL
715 @Potential_commit_point_label: Read_Key_Point
716 @Label: Get_Success_Point_1
719 slot *V = val(kvs, idx);
723 model_print("Key is null\n");
724 return NULL; // A miss
727 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
728 // Key hit! Check if table-resize in progress
732 @Commit_point_define: true
733 @Potential_commit_point_label: Read_Val_Point
734 @Label: Get_Success_Point_2
737 return (V == TOMBSTONE) ? NULL : V; // Return this value
739 // Otherwise, finish the copy & retry in the new table
740 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
741 idx, key_slot), key_slot, fullhash);
744 if (++reprobe_cnt >= REPROBE_LIMIT ||
745 key_slot == TOMBSTONE) {
746 // Retry in new table
747 // Atomic read can be here
748 kvs_data *newkvs = chm->_newkvs.load(memory_order_relaxed);
751 @Commit_point_define_check: newkvs == NULL
752 @Label: Get_Success_Point_3
755 return newkvs == NULL ? NULL : get_impl(topmap,
756 topmap->help_copy(newkvs), key_slot, fullhash);
759 idx = (idx + 1) & (len - 1); // Reprobe by 1
763 // A wrapper of the essential function putIfMatch()
764 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
765 // TODO: Should throw an exception rather return NULL
766 if (old_val == NULL) {
769 slot *key_slot = new slot(false, key);
771 slot *value_slot = new slot(false, value);
772 kvs_data *kvs = _kvs.load(memory_order_relaxed);
773 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
774 // Only when copy_slot() call putIfMatch() will it return NULL
775 MODEL_ASSERT (res != NULL);
776 MODEL_ASSERT (!is_prime(res));
777 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
781 Put, Remove, PutIfAbsent, etc will call this function. Return the old
782 value. If the returned value is equals to the expVal (or expVal is
783 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
784 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
785 NULL if passed a NULL expVal.
787 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
788 *key_slot, slot *val_slot, slot *expVal) {
789 MODEL_ASSERT (val_slot != NULL);
790 MODEL_ASSERT (!is_prime(val_slot));
791 MODEL_ASSERT (!is_prime(expVal));
793 int fullhash = hash(key_slot);
794 int len = kvs->_size;
795 CHM *chm = get_chm(kvs);
796 int *hashes = get_hashes(kvs);
797 int idx = fullhash & (len - 1);
805 while (true) { // Spin till we get a key slot
808 if (K == NULL) { // Get a free slot
809 if (val_slot == TOMBSTONE) return val_slot;
810 // Claim the null key-slot
811 if (CAS_key(kvs, idx, NULL, key_slot)) {
812 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
813 hashes[idx] = fullhash; // Memorize full hash
816 K = key(kvs, idx); // CAS failed, get updated value
817 MODEL_ASSERT (K != NULL);
820 // Key slot not null, there exists a Key here
821 if (keyeq(K, key_slot, hashes, idx, fullhash))
824 // Notice that the logic here should be consistent with that of get.
825 // The first predicate means too many reprobes means nothing in the
827 if (++reprobe_cnt >= reprobe_limit(len) ||
828 K == TOMBSTONE) { // Found a Tombstone key, no more keys
829 newkvs = chm->resize(topmap, kvs);
830 //model_print("resize1\n");
831 // Help along an existing copy
832 if (expVal != NULL) topmap->help_copy(newkvs);
833 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
836 idx = (idx + 1) & (len - 1); // Reprobe
837 } // End of spinning till we get a Key slot
839 if (val_slot == V) return V; // Fast cutout for no-change
841 // Here it tries to resize cause it doesn't want other threads to stop
842 // its progress (eagerly try to resize soon)
843 newkvs = chm->_newkvs.load(memory_order_relaxed);
844 if (newkvs == NULL &&
845 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
846 //model_print("resize2\n");
847 newkvs = chm->resize(topmap, kvs); // Force the copy to start
850 // Finish the copy and then put it in the new table
852 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
853 expVal), key_slot, val_slot, expVal);
855 // Decided to update the existing table
857 MODEL_ASSERT (!is_prime(V));
859 if (expVal != NO_MATCH_OLD &&
861 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
862 !(V == NULL && expVal == TOMBSTONE) &&
863 (expVal == NULL || !valeq(expVal, V))) {
866 @Commit_point_define: expVal == TOMBSTONE
867 @Potential_commit_point_label: Read_Val_Point
868 @Label: PutIfAbsent_Fail_Point
869 # This is a check for the PutIfAbsent() when the value
875 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
876 @Potential_commit_point_label: Read_Val_Point
877 @Label: RemoveIfMatch_Fail_Point
882 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
883 @Potential_commit_point_label: Read_Val_Point
884 @Label: ReplaceIfMatch_Fail_Point
887 return V; // Do not update!
890 if (CAS_val(kvs, idx, V, val_slot)) {
893 # The only point where a successful put happens
894 @Commit_point_define: true
895 @Potential_commit_point_label: Write_Val_Point
896 @Label: Write_Success_Point
899 if (expVal != NULL) { // Not called by a table-copy
900 // CAS succeeded, should adjust size
901 // Both normal put's and table-copy calls putIfMatch, but
902 // table-copy does not increase the number of live K/V pairs
903 if ((V == NULL || V == TOMBSTONE) &&
904 val_slot != TOMBSTONE)
905 chm->_size.fetch_add(1, memory_order_relaxed);
906 if (!(V == NULL || V == TOMBSTONE) &&
907 val_slot == TOMBSTONE)
908 chm->_size.fetch_add(-1, memory_order_relaxed);
910 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
915 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
916 idx, expVal), key_slot, val_slot, expVal);
920 // Help along an existing table-resize. This is a fast cut-out wrapper.
921 kvs_data* help_copy(kvs_data *helper) {
922 kvs_data *topkvs = _kvs.load(memory_order_relaxed);
923 CHM *topchm = get_chm(topkvs);
924 // No cpy in progress
925 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
926 topchm->help_copy_impl(this, topkvs, false);