1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
24 This header file declares and defines a simplified version of Cliff Click's
25 NonblockingHashMap. It contains all the necessary structrues and main
26 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
34 Corresponding the the Object[] array in Cliff Click's Java implementation.
35 It keeps the first two slots for CHM (Hashtable control unit) and the hash
36 records (an array of hash used for fast negative key-equality check).
44 int real_size = sz * 2 + 2;
45 _data = new atomic<void*>[real_size];
46 // The control block should be initialized in resize()
47 // Init the hash record array
48 int *hashes = new int[_size];
50 for (i = 0; i < _size; i++) {
53 // Init the data to Null slot
54 for (i = 2; i < real_size; i++) {
55 _data[i].store(NULL, memory_order_relaxed);
57 _data[1].store(hashes, memory_order_release);
61 int *hashes = (int*) _data[1].load(memory_order_relaxed);
71 slot(bool prime, void *ptr) {
79 TypeK must have defined function "int hashCode()" which return the hash
80 code for the its object, and "int equals(TypeK anotherKey)" which is
81 used to judge equality.
82 TypeK and TypeV should define their own copy constructor.
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
92 # The synchronization we have for the hashtable gives us the property of
93 # serializability, so we should have a sequential hashtable when we check the
94 # correctness. The key thing is to identify all the commit point.
99 CLASS = cliffc_hashtable;
106 map = new_spec_table_default(equals_key);
107 id_map = new_spec_table_default(equals_id);
111 bool equals_key(void *ptr1, void *ptr2) {
112 TypeK *key1 = (TypeK*) ptr1,
113 *key2 = (TypeK*) ptr2;
114 if (key1 == NULL || key2 == NULL)
116 return key1->equals(key2);
120 bool equals_val(void *ptr1, void *ptr2) {
121 TypeV *val1 = (TypeV*) ptr1,
122 *val2 = (TypeV*) ptr2;
123 if (val1 == NULL || val2 == NULL)
125 return val1->equals(val2);
129 bool equals_id(void *ptr1, void *ptr2) {
130 id_tag_t *id1 = (id_tag_t*) ptr1,
131 *id2 = (id_tag_t*) ptr2;
132 if (id1 == NULL || id2 == NULL)
134 return (*id1).tag == (*id2).tag;
138 # Update the tag for the current key slot if the corresponding tag
139 # is NULL, otherwise just return that tag. It will update the next
140 # available tag too if it requires a new tag for that key slot.
141 id_tag_t getKeyTag(TypeK *key) {
142 if (spec_table_contains(id_map, key)) {
143 id_tag_t *cur_tag = MODEL_MALLOC(sizeof(id_tag_t));
144 *cur_tag = current(tag);
145 spec_table_put(id_map, key, cur_tag);
149 id_tag_t *res = (id_tag_t*) spec_table_get(id_map, key);
166 PutIfAbsent(COND_PutIfAbsentSucc),
168 RemoveIfMatch(COND_RemoveIfMatchSucc),
170 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
173 //Write_interface -> Read_interface
181 The control structure for the hashtable
185 friend class cliffc_hashtable;
187 atomic<kvs_data*> _newkvs;
189 // Size of active K,V pairs
192 // Count of used slots
195 // The next part of the table to copy
196 atomic_int _copy_idx;
198 // Work-done reporting
199 atomic_int _copy_done;
203 _newkvs.store(NULL, memory_order_relaxed);
204 _size.store(size, memory_order_relaxed);
205 _slots.store(0, memory_order_relaxed);
207 _copy_idx.store(0, memory_order_relaxed);
208 _copy_done.store(0, memory_order_release);
215 // Heuristic to decide if the table is too full
216 bool table_full(int reprobe_cnt, int len) {
218 reprobe_cnt >= REPROBE_LIMIT &&
219 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
222 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
223 //model_print("resizing...\n");
224 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
228 // No copy in-progress, start one; Only double the table size
229 int oldlen = kvs->_size;
230 int sz = _size.load(memory_order_relaxed);
233 // Just follow Cliff Click's heuristic to decide the new size
234 if (sz >= (oldlen >> 2)) { // If we are 25% full
235 newsz = oldlen << 1; // Double size
236 if (sz >= (oldlen >> 1))
237 newsz = oldlen << 2; // Double double size
240 // We do not record the record timestamp
241 if (newsz <= oldlen) newsz = oldlen << 1;
242 // Do not shrink ever
243 if (newsz < oldlen) newsz = oldlen;
245 // Last check cause the 'new' below is expensive
246 newkvs = _newkvs.load(memory_order_acquire);
247 if (newkvs != NULL) return newkvs;
249 newkvs = new kvs_data(newsz);
250 void *chm = (void*) new CHM(sz);
251 newkvs->_data[0].store(chm, memory_order_release);
253 kvs_data *cur_newkvs;
254 // Another check after the slow allocation
255 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
257 // CAS the _newkvs to the allocated table
258 kvs_data *desired = (kvs_data*) NULL;
259 kvs_data *expected = (kvs_data*) newkvs;
260 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
261 memory_order_release)) {
262 // Should clean the allocated area
264 newkvs = _newkvs.load(memory_order_acquire);
269 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
271 MODEL_ASSERT (get_chm(oldkvs) == this);
272 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
273 int oldlen = oldkvs->_size;
274 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
276 // Just follow Cliff Click's code here
277 int panic_start = -1;
279 while (_copy_done.load(memory_order_acquire) < oldlen) {
280 copyidx = _copy_idx.load(memory_order_acquire);
281 if (panic_start == -1) { // No painc
282 copyidx = _copy_idx.load(memory_order_acquire);
283 while (copyidx < (oldlen << 1) &&
284 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
285 min_copy_work, memory_order_release, memory_order_release))
286 copyidx = _copy_idx.load(memory_order_relaxed);
287 if (!(copyidx < (oldlen << 1)))
288 panic_start = copyidx;
291 // Now copy the chunk of work we claimed
293 for (int i = 0; i < min_copy_work; i++)
294 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
298 copy_check_and_promote(topmap, oldkvs, workdone);
300 copyidx += min_copy_work;
301 if (!copy_all && panic_start == -1)
302 return; // We are done with the work we claim
304 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
307 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
308 *oldkvs, int idx, void *should_help) {
309 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
310 // We're only here cause the caller saw a Prime
311 if (copy_slot(topmap, idx, oldkvs, newkvs))
312 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
313 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
316 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
317 oldkvs, int workdone) {
318 int oldlen = oldkvs->_size;
319 int copyDone = _copy_done.load(memory_order_relaxed);
322 copyDone = _copy_done.load(memory_order_relaxed);
323 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
324 workdone, memory_order_relaxed, memory_order_relaxed))
329 // Promote the new table to the current table
330 if (copyDone + workdone == oldlen &&
331 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
332 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
333 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
334 memory_order_release);
338 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
341 while ((key_slot = key(oldkvs, idx)) == NULL)
342 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
344 // First CAS old to Prime
345 slot *oldval = val(oldkvs, idx);
346 while (!is_prime(oldval)) {
347 slot *box = (oldval == NULL || oldval == TOMBSTONE)
348 ? TOMBPRIME : new slot(true, oldval->_ptr);
349 if (CAS_val(oldkvs, idx, oldval, box)) {
350 if (box == TOMBPRIME)
351 return 1; // Copy done
352 // Otherwise we CAS'd the box
353 oldval = box; // Record updated oldval
356 oldval = val(oldkvs, idx); // Else re-try
359 if (oldval == TOMBPRIME) return false; // Copy already completed here
361 slot *old_unboxed = new slot(false, oldval->_ptr);
362 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
365 // Old value is exposed in the new table
366 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
367 oldval = val(oldkvs, idx);
369 return copied_into_new;
376 static const int Default_Init_Size = 4; // Intial table size
378 static slot* const MATCH_ANY;
379 static slot* const NO_MATCH_OLD;
381 static slot* const TOMBPRIME;
382 static slot* const TOMBSTONE;
384 static const int REPROBE_LIMIT = 10; // Forces a table-resize
386 atomic<kvs_data*> _kvs;
390 // Should initialize the CHM for the construction of the table
391 // For other CHM in kvs_data, they should be initialzed in resize()
392 // because the size is determined dynamically
393 kvs_data *kvs = new kvs_data(Default_Init_Size);
394 void *chm = (void*) new CHM(0);
395 kvs->_data[0].store(chm, memory_order_relaxed);
396 _kvs.store(kvs, memory_order_release);
399 cliffc_hashtable(int init_size) {
400 // Should initialize the CHM for the construction of the table
401 // For other CHM in kvs_data, they should be initialzed in resize()
402 // because the size is determined dynamically
409 kvs_data *kvs = new kvs_data(init_size);
410 void *chm = (void*) new CHM(0);
411 kvs->_data[0].store(chm, memory_order_relaxed);
412 _kvs.store(kvs, memory_order_release);
418 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
421 void *_Old_Val = spec_table_get(map, key);
423 equals_val(_Old_Val, __RET__)
426 TypeV* get(TypeK *key) {
427 slot *key_slot = new slot(false, key);
428 int fullhash = hash(key_slot);
429 kvs_data *kvs = _kvs.load(memory_order_acquire);
430 slot *V = get_impl(this, kvs, key_slot, fullhash);
431 if (V == NULL) return NULL;
432 MODEL_ASSERT (!is_prime(V));
433 return (TypeV*) V->_ptr;
439 @Commit_point_set: Write_Success_Point
442 # Remember this old value at checking point
443 void *_Old_Val = spec_table_get(map, key);
444 spec_table_put(map, key, val);
446 equals_val(__RET__, _Old_Val)
449 TypeV* put(TypeK *key, TypeV *val) {
450 return putIfMatch(key, val, NO_MATCH_OLD);
455 @Interface: PutIfAbsent
457 Write_Success_Point | PutIfAbsent_Fail_Point
458 @Condition: !spec_table_contains(map, key)
460 COND_PutIfAbsentSucc :: __RET__ == NULL
463 void *_Old_Val = spec_table_get(map, key);
465 spec_table_put(map, key, value);
467 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
470 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
471 return putIfMatch(key, val, TOMBSTONE);
476 @Interface: RemoveAny
477 @Commit_point_set: Write_Success_Point
480 void *_Old_Val = spec_table_get(map, key);
481 spec_table_put(map, key, NULL);
483 equals_val(__RET__, _Old_Val)
486 TypeV* remove(TypeK *key) {
487 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
492 @Interface: RemoveIfMatch
494 Write_Success_Point | RemoveIfMatch_Fail_Point
496 equals_val(spec_table_get(map, key), val)
498 COND_RemoveIfMatchSucc :: __RET__ == true
502 spec_table_put(map, key, NULL);
504 __COND_SAT__ ? __RET__ : !__RET__
507 bool remove(TypeK *key, TypeV *val) {
508 slot *val_slot = val == NULL ? NULL : new slot(false, val);
509 return putIfMatch(key, TOMBSTONE, val) == val;
515 @Interface: ReplaceAny
520 void *_Old_Val = spec_table_get(map, key);
522 equals_val(__RET__, _Old_Val)
525 TypeV* replace(TypeK *key, TypeV *val) {
526 return putIfMatch(key, val, MATCH_ANY);
531 @Interface: ReplaceIfMatch
533 Write_Success_Point | ReplaceIfMatch_Fail_Point
535 equals_val(spec_table_get(map, key), oldval)
537 COND_ReplaceIfMatchSucc :: __RET__ == true
541 spec_table_put(map, key, newval);
543 __COND_SAT__ ? __RET__ : !__RET__
546 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
547 return putIfMatch(key, newval, oldval) == oldval;
551 static CHM* get_chm(kvs_data* kvs) {
552 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
556 static int* get_hashes(kvs_data *kvs) {
557 return (int *) kvs->_data[1].load(memory_order_relaxed);
560 // Preserve happens-before semantics on newly inserted keys
561 static inline slot* key(kvs_data *kvs, int idx) {
562 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
563 // Corresponding to the volatile read in get_impl() and putIfMatch in
564 // Cliff Click's Java implementation
565 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
570 The atomic operation in val() function is a "potential" commit point,
571 which means in some case it is a real commit point while it is not for
572 some other cases. This so happens because the val() function is such a
573 fundamental function that many internal operation will call. Our
574 strategy is that we label any potential commit points and check if they
575 really are the commit points later.
577 // Preserve happens-before semantics on newly inserted values
578 static inline slot* val(kvs_data *kvs, int idx) {
579 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
580 // Corresponding to the volatile read in get_impl() and putIfMatch in
581 // Cliff Click's Java implementation
582 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
585 # This is a complicated potential commit point since many many functions are
587 @Potential_commit_point_define: true
588 @Label: Read_Val_Point
596 static int hash(slot *key_slot) {
597 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
598 TypeK* key = (TypeK*) key_slot->_ptr;
599 int h = key->hashCode();
600 // Spread bits according to Cliff Click's code
601 h += (h << 15) ^ 0xffffcd7d;
605 h += (h << 2) + (h << 14);
606 return h ^ (h >> 16);
609 // Heuristic to decide if reprobed too many times.
610 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
611 // put it triggers a table resize. Several places MUST have exact agreement.
612 static int reprobe_limit(int len) {
613 return REPROBE_LIMIT + (len >> 2);
616 static inline bool is_prime(slot *val) {
617 return (val != NULL) && val->_prime;
620 // Check for key equality. Try direct pointer comparison first (fast
621 // negative teset) and then the full 'equals' call
622 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
624 // Caller should've checked this.
625 MODEL_ASSERT (K != NULL);
626 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
629 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
631 key_ptr->equals(K->_ptr));
634 static bool valeq(slot *val_slot1, slot *val_slot2) {
635 MODEL_ASSERT (val_slot1 != NULL);
636 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
637 if (val_slot2 == NULL || ptr1 == NULL) return false;
638 return ptr1->equals(val_slot2->_ptr);
641 // Together with key() preserve the happens-before relationship on newly
643 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
644 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
645 desired, memory_order_release, memory_order_release);
649 Same as the val() function, we only label the CAS operation as the
650 potential commit point.
652 // Together with val() preserve the happens-before relationship on newly
654 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
656 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
657 desired, memory_order_release, memory_order_release);
659 # If it is a successful put instead of a copy or any other internal
660 # operantions, expected != NULL
662 @Potential_commit_point_define: res == true
663 @Label: Write_Val_Point
669 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
671 int len = kvs->_size;
672 CHM *chm = get_chm(kvs);
673 int *hashes = get_hashes(kvs);
675 int idx = fullhash & (len - 1);
678 slot *K = key(kvs, idx);
679 slot *V = val(kvs, idx);
682 @Commit_point_define: V == NULL
683 @Potential_commit_point_label: Read_Val_Point
684 @Label: Get_Success_Point_1
688 if (K == NULL) return NULL; // A miss
690 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
691 // Key hit! Check if table-resize in progress
695 @Commit_point_define: true
696 @Potential_commit_point_label: Read_Val_Point
697 @Label: Get_Success_Point_2
700 return (V == TOMBSTONE) ? NULL : V; // Return this value
702 // Otherwise, finish the copy & retry in the new table
703 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
704 idx, key_slot), key_slot, fullhash);
707 if (++reprobe_cnt >= REPROBE_LIMIT ||
708 key_slot == TOMBSTONE) {
709 // Retry in new table
710 // Atomic read (acquire) can be here
711 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
714 @Commit_point_define_check: newkvs == NULL
715 @Label: Get_Success_Point_3
718 return newkvs == NULL ? NULL : get_impl(topmap,
719 topmap->help_copy(newkvs), key_slot, fullhash);
722 idx = (idx + 1) & (len - 1); // Reprobe by 1
726 // A wrapper of the essential function putIfMatch()
727 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
728 // TODO: Should throw an exception rather return NULL
729 if (old_val == NULL) {
732 slot *key_slot = new slot(false, key);
734 slot *value_slot = new slot(false, value);
735 kvs_data *kvs = _kvs.load(memory_order_acquire);
736 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
737 // Only when copy_slot() call putIfMatch() will it return NULL
738 MODEL_ASSERT (res != NULL);
739 MODEL_ASSERT (!is_prime(res));
740 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
744 Put, Remove, PutIfAbsent, etc will call this function. Return the old
745 value. If the returned value is equals to the expVal (or expVal is
746 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
747 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
748 NULL if passed a NULL expVal.
750 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
751 *key_slot, slot *val_slot, slot *expVal) {
752 MODEL_ASSERT (val_slot != NULL);
753 MODEL_ASSERT (!is_prime(val_slot));
754 MODEL_ASSERT (!is_prime(expVal));
756 int fullhash = hash(key_slot);
757 int len = kvs->_size;
758 CHM *chm = get_chm(kvs);
759 int *hashes = get_hashes(kvs);
760 int idx = fullhash & (len - 1);
768 while (true) { // Spin till we get a key slot
771 if (K == NULL) { // Get a free slot
772 if (val_slot == TOMBSTONE) return val_slot;
773 // Claim the null key-slot
774 if (CAS_key(kvs, idx, NULL, key_slot)) {
775 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
776 hashes[idx] = fullhash; // Memorize full hash
779 K = key(kvs, idx); // CAS failed, get updated value
780 MODEL_ASSERT (K != NULL);
783 // Key slot not null, there exists a Key here
784 if (keyeq(K, key_slot, hashes, idx, fullhash))
787 // Notice that the logic here should be consistent with that of get.
788 // The first predicate means too many reprobes means nothing in the
790 if (++reprobe_cnt >= reprobe_limit(len) ||
791 K == TOMBSTONE) { // Found a Tombstone key, no more keys
792 newkvs = chm->resize(topmap, kvs);
793 // Help along an existing copy
794 if (expVal != NULL) topmap->help_copy(newkvs);
795 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
798 idx = (idx + 1) & (len - 1); // Reprobe
799 } // End of spinning till we get a Key slot
801 if (val_slot == V) return V; // Fast cutout for no-change
803 // Here it tries to resize cause it doesn't want other threads to stop
804 // its progress (eagerly try to resize soon)
805 newkvs = chm->_newkvs.load(memory_order_acquire);
806 if (newkvs == NULL &&
807 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
808 newkvs = chm->resize(topmap, kvs); // Force the copy to start
810 // Finish the copy and then put it in the new table
812 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
813 expVal), key_slot, val_slot, expVal);
815 // Decided to update the existing table
817 MODEL_ASSERT (!is_prime(V));
819 if (expVal != NO_MATCH_OLD &&
821 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
822 !(V == NULL && expVal == TOMBSTONE) &&
823 (expVal == NULL || !valeq(expVal, V))) {
826 @Commit_point_define: expVal == TOMBSTONE
827 @Potential_commit_point_label: Read_Val_Point
828 @Label: PutIfAbsent_Fail_Point
829 # This is a check for the PutIfAbsent() when the value
835 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
836 @Potential_commit_point_label: Read_Val_Point
837 @Label: RemoveIfMatch_Fail_Point
842 @Commit_point_define: !valeq(expVal, V)
843 @Potential_commit_point_label: Read_Val_Point
844 @Label: ReplaceIfMatch_Fail_Point
847 return V; // Do not update!
850 if (CAS_val(kvs, idx, V, val_slot)) {
853 # The only point where a successful put happens
854 @Commit_point_define: true
855 @Potential_commit_point_label: Write_Val_Point
856 @Label: Write_Success_Point
859 if (expVal != NULL) { // Not called by a table-copy
860 // CAS succeeded, should adjust size
861 // Both normal put's and table-copy calls putIfMatch, but
862 // table-copy does not increase the number of live K/V pairs
863 if ((V == NULL || V == TOMBSTONE) &&
864 val_slot != TOMBSTONE)
865 chm->_size.fetch_add(1, memory_order_relaxed);
866 if (!(V == NULL || V == TOMBSTONE) &&
867 val_slot == TOMBSTONE)
868 chm->_size.fetch_add(-1, memory_order_relaxed);
870 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
875 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
876 idx, expVal), key_slot, val_slot, expVal);
880 // Help along an existing table-resize. This is a fast cut-out wrapper.
881 kvs_data* help_copy(kvs_data *helper) {
882 kvs_data *topkvs = _kvs.load(memory_order_acquire);
883 CHM *topchm = get_chm(topkvs);
884 // No cpy in progress
885 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
886 topchm->help_copy_impl(this, topkvs, false);