1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
24 This header file declares and defines a simplified version of Cliff Click's
25 NonblockingHashMap. It contains all the necessary structrues and main
26 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
34 Corresponding the the Object[] array in Cliff Click's Java implementation.
35 It keeps the first two slots for CHM (Hashtable control unit) and the hash
36 records (an array of hash used for fast negative key-equality check).
44 int real_size = sz * 2 + 2;
45 _data = new atomic<void*>[real_size];
46 // The control block should be initialized in resize()
47 // Init the hash record array
48 int *hashes = new int[_size];
50 for (i = 0; i < _size; i++) {
53 // Init the data to Null slot
54 for (i = 2; i < real_size; i++) {
55 _data[i].store(NULL, memory_order_relaxed);
57 _data[1].store(hashes, memory_order_release);
61 int *hashes = (int*) _data[1].load(memory_order_relaxed);
71 slot(bool prime, void *ptr) {
79 TypeK must have defined function "int hashCode()" which return the hash
80 code for the its object, and "int equals(TypeK anotherKey)" which is
81 used to judge equality.
82 TypeK and TypeV should define their own copy constructor.
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
92 # The synchronization we have for the hashtable gives us the property of
93 # serializability, so we should have a sequential hashtable when we check the
94 # correctness. The key thing is to identify all the commit point.
99 CLASS = cliffc_hashtable;
106 map = new_spec_table_default(equals_key);
107 id_map = new_spec_table_default(equals_id);
111 bool equals_key(void *ptr1, void *ptr2) {
112 TypeK *key1 = (TypeK*) ptr1,
113 *key2 = (TypeK*) ptr2;
114 if (key1 == NULL || key2 == NULL)
116 return key1->equals(key2);
120 bool equals_val(void *ptr1, void *ptr2) {
121 TypeV *val1 = (TypeV*) ptr1,
122 *val2 = (TypeV*) ptr2;
123 if (val1 == NULL || val2 == NULL)
125 return val1->equals(val2);
129 bool equals_id(void *ptr1, void *ptr2) {
130 id_tag_t *id1 = (id_tag_t*) ptr1,
131 *id2 = (id_tag_t*) ptr2;
132 if (id1 == NULL || id2 == NULL)
134 return (*id1).tag == (*id2).tag;
138 # Update the tag for the current key slot if the corresponding tag
139 # is NULL, otherwise just return that tag. It will update the next
140 # available tag too if it requires a new tag for that key slot.
141 call_id_t getKeyTag(TypeK *key) {
142 if (!spec_table_contains(id_map, key)) {
143 call_id_t cur_id = current(tag);
144 spec_table_put(id_map, key, (void*) cur_id);
148 call_id_t res = (call_id_t) spec_table_get(id_map, key);
165 PutIfAbsent(COND_PutIfAbsentSucc),
167 RemoveIfMatch(COND_RemoveIfMatchSucc),
169 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
172 //Write_interface -> Read_interface
180 The control structure for the hashtable
184 friend class cliffc_hashtable;
186 atomic<kvs_data*> _newkvs;
188 // Size of active K,V pairs
191 // Count of used slots
194 // The next part of the table to copy
195 atomic_int _copy_idx;
197 // Work-done reporting
198 atomic_int _copy_done;
202 _newkvs.store(NULL, memory_order_relaxed);
203 _size.store(size, memory_order_relaxed);
204 _slots.store(0, memory_order_relaxed);
206 _copy_idx.store(0, memory_order_relaxed);
207 _copy_done.store(0, memory_order_release);
214 // Heuristic to decide if the table is too full
215 bool table_full(int reprobe_cnt, int len) {
217 reprobe_cnt >= REPROBE_LIMIT &&
218 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
221 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
222 //model_print("resizing...\n");
223 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
227 // No copy in-progress, start one; Only double the table size
228 int oldlen = kvs->_size;
229 int sz = _size.load(memory_order_relaxed);
232 // Just follow Cliff Click's heuristic to decide the new size
233 if (sz >= (oldlen >> 2)) { // If we are 25% full
234 newsz = oldlen << 1; // Double size
235 if (sz >= (oldlen >> 1))
236 newsz = oldlen << 2; // Double double size
239 // We do not record the record timestamp
240 if (newsz <= oldlen) newsz = oldlen << 1;
241 // Do not shrink ever
242 if (newsz < oldlen) newsz = oldlen;
244 // Last check cause the 'new' below is expensive
245 newkvs = _newkvs.load(memory_order_acquire);
246 if (newkvs != NULL) return newkvs;
248 newkvs = new kvs_data(newsz);
249 void *chm = (void*) new CHM(sz);
250 newkvs->_data[0].store(chm, memory_order_release);
252 kvs_data *cur_newkvs;
253 // Another check after the slow allocation
254 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
256 // CAS the _newkvs to the allocated table
257 kvs_data *desired = (kvs_data*) NULL;
258 kvs_data *expected = (kvs_data*) newkvs;
259 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
260 memory_order_release)) {
261 // Should clean the allocated area
263 newkvs = _newkvs.load(memory_order_acquire);
268 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
270 MODEL_ASSERT (get_chm(oldkvs) == this);
271 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
272 int oldlen = oldkvs->_size;
273 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
275 // Just follow Cliff Click's code here
276 int panic_start = -1;
278 while (_copy_done.load(memory_order_acquire) < oldlen) {
279 copyidx = _copy_idx.load(memory_order_acquire);
280 if (panic_start == -1) { // No painc
281 copyidx = _copy_idx.load(memory_order_acquire);
282 while (copyidx < (oldlen << 1) &&
283 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
284 min_copy_work, memory_order_release, memory_order_release))
285 copyidx = _copy_idx.load(memory_order_relaxed);
286 if (!(copyidx < (oldlen << 1)))
287 panic_start = copyidx;
290 // Now copy the chunk of work we claimed
292 for (int i = 0; i < min_copy_work; i++)
293 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
297 copy_check_and_promote(topmap, oldkvs, workdone);
299 copyidx += min_copy_work;
300 if (!copy_all && panic_start == -1)
301 return; // We are done with the work we claim
303 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
306 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
307 *oldkvs, int idx, void *should_help) {
308 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
309 // We're only here cause the caller saw a Prime
310 if (copy_slot(topmap, idx, oldkvs, newkvs))
311 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
312 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
315 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
316 oldkvs, int workdone) {
317 int oldlen = oldkvs->_size;
318 int copyDone = _copy_done.load(memory_order_relaxed);
321 copyDone = _copy_done.load(memory_order_relaxed);
322 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
323 workdone, memory_order_relaxed, memory_order_relaxed))
328 // Promote the new table to the current table
329 if (copyDone + workdone == oldlen &&
330 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
331 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
332 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
333 memory_order_release);
337 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
340 while ((key_slot = key(oldkvs, idx)) == NULL)
341 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
343 // First CAS old to Prime
344 slot *oldval = val(oldkvs, idx);
345 while (!is_prime(oldval)) {
346 slot *box = (oldval == NULL || oldval == TOMBSTONE)
347 ? TOMBPRIME : new slot(true, oldval->_ptr);
348 if (CAS_val(oldkvs, idx, oldval, box)) {
349 if (box == TOMBPRIME)
350 return 1; // Copy done
351 // Otherwise we CAS'd the box
352 oldval = box; // Record updated oldval
355 oldval = val(oldkvs, idx); // Else re-try
358 if (oldval == TOMBPRIME) return false; // Copy already completed here
360 slot *old_unboxed = new slot(false, oldval->_ptr);
361 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
364 // Old value is exposed in the new table
365 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
366 oldval = val(oldkvs, idx);
368 return copied_into_new;
375 static const int Default_Init_Size = 4; // Intial table size
377 static slot* const MATCH_ANY;
378 static slot* const NO_MATCH_OLD;
380 static slot* const TOMBPRIME;
381 static slot* const TOMBSTONE;
383 static const int REPROBE_LIMIT = 10; // Forces a table-resize
385 atomic<kvs_data*> _kvs;
389 // Should initialize the CHM for the construction of the table
390 // For other CHM in kvs_data, they should be initialzed in resize()
391 // because the size is determined dynamically
397 kvs_data *kvs = new kvs_data(Default_Init_Size);
398 void *chm = (void*) new CHM(0);
399 kvs->_data[0].store(chm, memory_order_relaxed);
400 _kvs.store(kvs, memory_order_release);
403 cliffc_hashtable(int init_size) {
404 // Should initialize the CHM for the construction of the table
405 // For other CHM in kvs_data, they should be initialzed in resize()
406 // because the size is determined dynamically
413 kvs_data *kvs = new kvs_data(init_size);
414 void *chm = (void*) new CHM(0);
415 kvs->_data[0].store(chm, memory_order_relaxed);
416 _kvs.store(kvs, memory_order_release);
422 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
425 void *_Old_Val = spec_table_get(map, key);
427 equals_val(_Old_Val, __RET__)
430 TypeV* get(TypeK *key) {
431 slot *key_slot = new slot(false, key);
432 int fullhash = hash(key_slot);
433 kvs_data *kvs = _kvs.load(memory_order_acquire);
434 slot *V = get_impl(this, kvs, key_slot, fullhash);
435 if (V == NULL) return NULL;
436 MODEL_ASSERT (!is_prime(V));
437 return (TypeV*) V->_ptr;
443 @Commit_point_set: Write_Success_Point
446 # Remember this old value at checking point
447 void *_Old_Val = spec_table_get(map, key);
448 spec_table_put(map, key, val);
450 equals_val(__RET__, _Old_Val)
453 TypeV* put(TypeK *key, TypeV *val) {
454 return putIfMatch(key, val, NO_MATCH_OLD);
459 @Interface: PutIfAbsent
461 Write_Success_Point | PutIfAbsent_Fail_Point
462 @Condition: !spec_table_contains(map, key)
464 COND_PutIfAbsentSucc :: __RET__ == NULL
467 void *_Old_Val = spec_table_get(map, key);
469 spec_table_put(map, key, value);
471 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
474 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
475 return putIfMatch(key, val, TOMBSTONE);
480 @Interface: RemoveAny
481 @Commit_point_set: Write_Success_Point
484 void *_Old_Val = spec_table_get(map, key);
485 spec_table_put(map, key, NULL);
487 equals_val(__RET__, _Old_Val)
490 TypeV* remove(TypeK *key) {
491 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
496 @Interface: RemoveIfMatch
498 Write_Success_Point | RemoveIfMatch_Fail_Point
500 equals_val(spec_table_get(map, key), val)
502 COND_RemoveIfMatchSucc :: __RET__ == true
506 spec_table_put(map, key, NULL);
508 __COND_SAT__ ? __RET__ : !__RET__
511 bool remove(TypeK *key, TypeV *val) {
512 slot *val_slot = val == NULL ? NULL : new slot(false, val);
513 return putIfMatch(key, TOMBSTONE, val) == val;
519 @Interface: ReplaceAny
524 void *_Old_Val = spec_table_get(map, key);
526 equals_val(__RET__, _Old_Val)
529 TypeV* replace(TypeK *key, TypeV *val) {
530 return putIfMatch(key, val, MATCH_ANY);
535 @Interface: ReplaceIfMatch
537 Write_Success_Point | ReplaceIfMatch_Fail_Point
539 equals_val(spec_table_get(map, key), oldval)
541 COND_ReplaceIfMatchSucc :: __RET__ == true
545 spec_table_put(map, key, newval);
547 __COND_SAT__ ? __RET__ : !__RET__
550 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
551 return putIfMatch(key, newval, oldval) == oldval;
555 static CHM* get_chm(kvs_data* kvs) {
556 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
560 static int* get_hashes(kvs_data *kvs) {
561 return (int *) kvs->_data[1].load(memory_order_relaxed);
564 // Preserve happens-before semantics on newly inserted keys
565 static inline slot* key(kvs_data *kvs, int idx) {
566 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
567 // Corresponding to the volatile read in get_impl() and putIfMatch in
568 // Cliff Click's Java implementation
569 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
574 The atomic operation in val() function is a "potential" commit point,
575 which means in some case it is a real commit point while it is not for
576 some other cases. This so happens because the val() function is such a
577 fundamental function that many internal operation will call. Our
578 strategy is that we label any potential commit points and check if they
579 really are the commit points later.
581 // Preserve happens-before semantics on newly inserted values
582 static inline slot* val(kvs_data *kvs, int idx) {
583 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
584 // Corresponding to the volatile read in get_impl() and putIfMatch in
585 // Cliff Click's Java implementation
586 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
589 # This is a complicated potential commit point since many many functions are
591 @Potential_commit_point_define: true
592 @Label: Read_Val_Point
600 static int hash(slot *key_slot) {
601 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
602 TypeK* key = (TypeK*) key_slot->_ptr;
603 int h = key->hashCode();
604 // Spread bits according to Cliff Click's code
605 h += (h << 15) ^ 0xffffcd7d;
609 h += (h << 2) + (h << 14);
610 return h ^ (h >> 16);
613 // Heuristic to decide if reprobed too many times.
614 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
615 // put it triggers a table resize. Several places MUST have exact agreement.
616 static int reprobe_limit(int len) {
617 return REPROBE_LIMIT + (len >> 2);
620 static inline bool is_prime(slot *val) {
621 return (val != NULL) && val->_prime;
624 // Check for key equality. Try direct pointer comparison first (fast
625 // negative teset) and then the full 'equals' call
626 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
628 // Caller should've checked this.
629 MODEL_ASSERT (K != NULL);
630 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
633 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
635 key_ptr->equals(K->_ptr));
638 static bool valeq(slot *val_slot1, slot *val_slot2) {
639 MODEL_ASSERT (val_slot1 != NULL);
640 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
641 if (val_slot2 == NULL || ptr1 == NULL) return false;
642 return ptr1->equals(val_slot2->_ptr);
645 // Together with key() preserve the happens-before relationship on newly
647 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
648 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
649 desired, memory_order_release, memory_order_release);
653 Same as the val() function, we only label the CAS operation as the
654 potential commit point.
656 // Together with val() preserve the happens-before relationship on newly
658 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
660 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
661 desired, memory_order_release, memory_order_release);
663 # If it is a successful put instead of a copy or any other internal
664 # operantions, expected != NULL
666 @Potential_commit_point_define: res == true
667 @Label: Write_Val_Point
673 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
675 int len = kvs->_size;
676 CHM *chm = get_chm(kvs);
677 int *hashes = get_hashes(kvs);
679 int idx = fullhash & (len - 1);
682 slot *K = key(kvs, idx);
683 slot *V = val(kvs, idx);
686 @Commit_point_define: V == NULL
687 @Potential_commit_point_label: Read_Val_Point
688 @Label: Get_Success_Point_1
692 if (K == NULL) return NULL; // A miss
694 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
695 // Key hit! Check if table-resize in progress
699 @Commit_point_define: true
700 @Potential_commit_point_label: Read_Val_Point
701 @Label: Get_Success_Point_2
704 return (V == TOMBSTONE) ? NULL : V; // Return this value
706 // Otherwise, finish the copy & retry in the new table
707 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
708 idx, key_slot), key_slot, fullhash);
711 if (++reprobe_cnt >= REPROBE_LIMIT ||
712 key_slot == TOMBSTONE) {
713 // Retry in new table
714 // Atomic read (acquire) can be here
715 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
718 @Commit_point_define_check: newkvs == NULL
719 @Label: Get_Success_Point_3
722 return newkvs == NULL ? NULL : get_impl(topmap,
723 topmap->help_copy(newkvs), key_slot, fullhash);
726 idx = (idx + 1) & (len - 1); // Reprobe by 1
730 // A wrapper of the essential function putIfMatch()
731 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
732 // TODO: Should throw an exception rather return NULL
733 if (old_val == NULL) {
736 slot *key_slot = new slot(false, key);
738 slot *value_slot = new slot(false, value);
739 kvs_data *kvs = _kvs.load(memory_order_acquire);
740 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
741 // Only when copy_slot() call putIfMatch() will it return NULL
742 MODEL_ASSERT (res != NULL);
743 MODEL_ASSERT (!is_prime(res));
744 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
748 Put, Remove, PutIfAbsent, etc will call this function. Return the old
749 value. If the returned value is equals to the expVal (or expVal is
750 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
751 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
752 NULL if passed a NULL expVal.
754 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
755 *key_slot, slot *val_slot, slot *expVal) {
756 MODEL_ASSERT (val_slot != NULL);
757 MODEL_ASSERT (!is_prime(val_slot));
758 MODEL_ASSERT (!is_prime(expVal));
760 int fullhash = hash(key_slot);
761 int len = kvs->_size;
762 CHM *chm = get_chm(kvs);
763 int *hashes = get_hashes(kvs);
764 int idx = fullhash & (len - 1);
772 while (true) { // Spin till we get a key slot
775 if (K == NULL) { // Get a free slot
776 if (val_slot == TOMBSTONE) return val_slot;
777 // Claim the null key-slot
778 if (CAS_key(kvs, idx, NULL, key_slot)) {
779 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
780 hashes[idx] = fullhash; // Memorize full hash
783 K = key(kvs, idx); // CAS failed, get updated value
784 MODEL_ASSERT (K != NULL);
787 // Key slot not null, there exists a Key here
788 if (keyeq(K, key_slot, hashes, idx, fullhash))
791 // Notice that the logic here should be consistent with that of get.
792 // The first predicate means too many reprobes means nothing in the
794 if (++reprobe_cnt >= reprobe_limit(len) ||
795 K == TOMBSTONE) { // Found a Tombstone key, no more keys
796 newkvs = chm->resize(topmap, kvs);
797 model_print("resize1\n");
798 // Help along an existing copy
799 if (expVal != NULL) topmap->help_copy(newkvs);
800 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
803 idx = (idx + 1) & (len - 1); // Reprobe
804 } // End of spinning till we get a Key slot
806 if (val_slot == V) return V; // Fast cutout for no-change
808 // Here it tries to resize cause it doesn't want other threads to stop
809 // its progress (eagerly try to resize soon)
810 newkvs = chm->_newkvs.load(memory_order_acquire);
811 if (newkvs == NULL &&
812 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
813 model_print("resize2\n");
814 newkvs = chm->resize(topmap, kvs); // Force the copy to start
817 // Finish the copy and then put it in the new table
819 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
820 expVal), key_slot, val_slot, expVal);
822 // Decided to update the existing table
824 MODEL_ASSERT (!is_prime(V));
826 if (expVal != NO_MATCH_OLD &&
828 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
829 !(V == NULL && expVal == TOMBSTONE) &&
830 (expVal == NULL || !valeq(expVal, V))) {
833 @Commit_point_define: expVal == TOMBSTONE
834 @Potential_commit_point_label: Read_Val_Point
835 @Label: PutIfAbsent_Fail_Point
836 # This is a check for the PutIfAbsent() when the value
842 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
843 @Potential_commit_point_label: Read_Val_Point
844 @Label: RemoveIfMatch_Fail_Point
849 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
850 @Potential_commit_point_label: Read_Val_Point
851 @Label: ReplaceIfMatch_Fail_Point
854 return V; // Do not update!
857 if (CAS_val(kvs, idx, V, val_slot)) {
860 # The only point where a successful put happens
861 @Commit_point_define: true
862 @Potential_commit_point_label: Write_Val_Point
863 @Label: Write_Success_Point
866 if (expVal != NULL) { // Not called by a table-copy
867 // CAS succeeded, should adjust size
868 // Both normal put's and table-copy calls putIfMatch, but
869 // table-copy does not increase the number of live K/V pairs
870 if ((V == NULL || V == TOMBSTONE) &&
871 val_slot != TOMBSTONE)
872 chm->_size.fetch_add(1, memory_order_relaxed);
873 if (!(V == NULL || V == TOMBSTONE) &&
874 val_slot == TOMBSTONE)
875 chm->_size.fetch_add(-1, memory_order_relaxed);
877 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
882 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
883 idx, expVal), key_slot, val_slot, expVal);
887 // Help along an existing table-resize. This is a fast cut-out wrapper.
888 kvs_data* help_copy(kvs_data *helper) {
889 kvs_data *topkvs = _kvs.load(memory_order_acquire);
890 CHM *topchm = get_chm(topkvs);
891 // No cpy in progress
892 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
893 topchm->help_copy_impl(this, topkvs, false);