1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
9 #define MODEL_ASSERT assert
11 #include <model-assert.h>
16 #include <cdsannotate.h>
17 #include <specannotation.h>
18 #include <model_memory.h>
24 This header file declares and defines a simplified version of Cliff Click's
25 NonblockingHashMap. It contains all the necessary structrues and main
26 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
30 template<typename TypeK, typename TypeV>
31 class cliffc_hashtable;
34 Corresponding the the Object[] array in Cliff Click's Java implementation.
35 It keeps the first two slots for CHM (Hashtable control unit) and the hash
36 records (an array of hash used for fast negative key-equality check).
44 int real_size = sz * 2 + 2;
45 _data = new atomic<void*>[real_size];
46 // The control block should be initialized in resize()
47 // Init the hash record array
48 int *hashes = new int[_size];
50 for (i = 0; i < _size; i++) {
53 // Init the data to Null slot
54 for (i = 2; i < real_size; i++) {
55 _data[i].store(NULL, memory_order_relaxed);
57 _data[1].store(hashes, memory_order_release);
61 int *hashes = (int*) _data[1].load(memory_order_relaxed);
71 slot(bool prime, void *ptr) {
79 TypeK must have defined function "int hashCode()" which return the hash
80 code for the its object, and "int equals(TypeK anotherKey)" which is
81 used to judge equality.
82 TypeK and TypeV should define their own copy constructor.
89 template<typename TypeK, typename TypeV>
90 class cliffc_hashtable {
92 # The synchronization we have for the hashtable gives us the property of
93 # serializability, so we should have a sequential hashtable when we check the
94 # correctness. The key thing is to identify all the commit point.
99 CLASS = cliffc_hashtable;
106 map = new_spec_table_default(equals_key);
107 id_map = new_spec_table_default(equals_id);
111 bool equals_key(void *ptr1, void *ptr2) {
112 TypeK *key1 = (TypeK*) ptr1,
113 *key2 = (TypeK*) ptr2;
114 if (key1 == NULL || key2 == NULL)
116 return key1->equals(key2);
120 bool equals_val(void *ptr1, void *ptr2) {
123 TypeV *val1 = (TypeV*) ptr1,
124 *val2 = (TypeV*) ptr2;
125 if (val1 == NULL || val2 == NULL)
127 return val1->equals(val2);
131 bool equals_id(void *ptr1, void *ptr2) {
132 id_tag_t *id1 = (id_tag_t*) ptr1,
133 *id2 = (id_tag_t*) ptr2;
134 if (id1 == NULL || id2 == NULL)
136 return (*id1).tag == (*id2).tag;
140 # Update the tag for the current key slot if the corresponding tag
141 # is NULL, otherwise just return that tag. It will update the next
142 # available tag too if it requires a new tag for that key slot.
143 call_id_t getKeyTag(TypeK *key) {
144 if (!spec_table_contains(id_map, key)) {
145 call_id_t cur_id = current(tag);
146 spec_table_put(id_map, key, (void*) cur_id);
150 call_id_t res = (call_id_t) spec_table_get(id_map, key);
167 PutIfAbsent(COND_PutIfAbsentSucc),
169 RemoveIfMatch(COND_RemoveIfMatchSucc),
171 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
174 //Write_interface -> Read_interface
182 The control structure for the hashtable
186 friend class cliffc_hashtable;
188 atomic<kvs_data*> _newkvs;
190 // Size of active K,V pairs
193 // Count of used slots
196 // The next part of the table to copy
197 atomic_int _copy_idx;
199 // Work-done reporting
200 atomic_int _copy_done;
204 _newkvs.store(NULL, memory_order_relaxed);
205 _size.store(size, memory_order_relaxed);
206 _slots.store(0, memory_order_relaxed);
208 _copy_idx.store(0, memory_order_relaxed);
209 _copy_done.store(0, memory_order_release);
216 // Heuristic to decide if the table is too full
217 bool table_full(int reprobe_cnt, int len) {
219 reprobe_cnt >= REPROBE_LIMIT &&
220 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
223 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
224 //model_print("resizing...\n");
225 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
229 // No copy in-progress, start one; Only double the table size
230 int oldlen = kvs->_size;
231 int sz = _size.load(memory_order_relaxed);
234 // Just follow Cliff Click's heuristic to decide the new size
235 if (sz >= (oldlen >> 2)) { // If we are 25% full
236 newsz = oldlen << 1; // Double size
237 if (sz >= (oldlen >> 1))
238 newsz = oldlen << 2; // Double double size
241 // We do not record the record timestamp
242 if (newsz <= oldlen) newsz = oldlen << 1;
243 // Do not shrink ever
244 if (newsz < oldlen) newsz = oldlen;
246 // Last check cause the 'new' below is expensive
247 newkvs = _newkvs.load(memory_order_acquire);
248 if (newkvs != NULL) return newkvs;
250 newkvs = new kvs_data(newsz);
251 void *chm = (void*) new CHM(sz);
252 newkvs->_data[0].store(chm, memory_order_release);
254 kvs_data *cur_newkvs;
255 // Another check after the slow allocation
256 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
258 // CAS the _newkvs to the allocated table
259 kvs_data *desired = (kvs_data*) NULL;
260 kvs_data *expected = (kvs_data*) newkvs;
261 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
262 memory_order_release)) {
263 // Should clean the allocated area
265 newkvs = _newkvs.load(memory_order_acquire);
270 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
272 MODEL_ASSERT (get_chm(oldkvs) == this);
273 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
274 int oldlen = oldkvs->_size;
275 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
277 // Just follow Cliff Click's code here
278 int panic_start = -1;
280 while (_copy_done.load(memory_order_acquire) < oldlen) {
281 copyidx = _copy_idx.load(memory_order_acquire);
282 if (panic_start == -1) { // No painc
283 copyidx = _copy_idx.load(memory_order_acquire);
284 while (copyidx < (oldlen << 1) &&
285 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
286 min_copy_work, memory_order_release, memory_order_release))
287 copyidx = _copy_idx.load(memory_order_relaxed);
288 if (!(copyidx < (oldlen << 1)))
289 panic_start = copyidx;
292 // Now copy the chunk of work we claimed
294 for (int i = 0; i < min_copy_work; i++)
295 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
299 copy_check_and_promote(topmap, oldkvs, workdone);
301 copyidx += min_copy_work;
302 if (!copy_all && panic_start == -1)
303 return; // We are done with the work we claim
305 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
308 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
309 *oldkvs, int idx, void *should_help) {
310 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
311 // We're only here cause the caller saw a Prime
312 if (copy_slot(topmap, idx, oldkvs, newkvs))
313 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
314 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
317 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
318 oldkvs, int workdone) {
319 int oldlen = oldkvs->_size;
320 int copyDone = _copy_done.load(memory_order_relaxed);
323 copyDone = _copy_done.load(memory_order_relaxed);
324 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
325 workdone, memory_order_relaxed, memory_order_relaxed))
330 // Promote the new table to the current table
331 if (copyDone + workdone == oldlen &&
332 topmap->_kvs.load(memory_order_acquire) == oldkvs) {
333 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
334 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
335 memory_order_release);
339 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
342 while ((key_slot = key(oldkvs, idx)) == NULL)
343 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
345 // First CAS old to Prime
346 slot *oldval = val(oldkvs, idx);
347 while (!is_prime(oldval)) {
348 slot *box = (oldval == NULL || oldval == TOMBSTONE)
349 ? TOMBPRIME : new slot(true, oldval->_ptr);
350 if (CAS_val(oldkvs, idx, oldval, box)) {
351 if (box == TOMBPRIME)
352 return 1; // Copy done
353 // Otherwise we CAS'd the box
354 oldval = box; // Record updated oldval
357 oldval = val(oldkvs, idx); // Else re-try
360 if (oldval == TOMBPRIME) return false; // Copy already completed here
362 slot *old_unboxed = new slot(false, oldval->_ptr);
363 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
366 // Old value is exposed in the new table
367 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
368 oldval = val(oldkvs, idx);
370 return copied_into_new;
377 static const int Default_Init_Size = 4; // Intial table size
379 static slot* const MATCH_ANY;
380 static slot* const NO_MATCH_OLD;
382 static slot* const TOMBPRIME;
383 static slot* const TOMBSTONE;
385 static const int REPROBE_LIMIT = 10; // Forces a table-resize
387 atomic<kvs_data*> _kvs;
391 // Should initialize the CHM for the construction of the table
392 // For other CHM in kvs_data, they should be initialzed in resize()
393 // because the size is determined dynamically
399 kvs_data *kvs = new kvs_data(Default_Init_Size);
400 void *chm = (void*) new CHM(0);
401 kvs->_data[0].store(chm, memory_order_relaxed);
402 _kvs.store(kvs, memory_order_release);
405 cliffc_hashtable(int init_size) {
406 // Should initialize the CHM for the construction of the table
407 // For other CHM in kvs_data, they should be initialzed in resize()
408 // because the size is determined dynamically
415 kvs_data *kvs = new kvs_data(init_size);
416 void *chm = (void*) new CHM(0);
417 kvs->_data[0].store(chm, memory_order_relaxed);
418 _kvs.store(kvs, memory_order_release);
424 @Commit_point_set: Get_Success_Point1 | Get_Success_Point2 | Get_Success_Point3
427 void *_Old_Val = spec_table_get(map, key);
429 equals_val(_Old_Val, __RET__)
432 TypeV* get(TypeK *key) {
433 slot *key_slot = new slot(false, key);
434 int fullhash = hash(key_slot);
435 kvs_data *kvs = _kvs.load(memory_order_acquire);
436 slot *V = get_impl(this, kvs, key_slot, fullhash);
437 if (V == NULL) return NULL;
438 MODEL_ASSERT (!is_prime(V));
439 return (TypeV*) V->_ptr;
445 @Commit_point_set: Write_Success_Point
448 # Remember this old value at checking point
449 void *_Old_Val = spec_table_get(map, key);
450 spec_table_put(map, key, val);
452 equals_val(__RET__, _Old_Val)
455 TypeV* put(TypeK *key, TypeV *val) {
456 return putIfMatch(key, val, NO_MATCH_OLD);
461 @Interface: PutIfAbsent
463 Write_Success_Point | PutIfAbsent_Fail_Point
464 @Condition: !spec_table_contains(map, key)
466 COND_PutIfAbsentSucc :: __RET__ == NULL
469 void *_Old_Val = spec_table_get(map, key);
471 spec_table_put(map, key, value);
473 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
476 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
477 return putIfMatch(key, val, TOMBSTONE);
482 @Interface: RemoveAny
483 @Commit_point_set: Write_Success_Point
486 void *_Old_Val = spec_table_get(map, key);
487 spec_table_put(map, key, NULL);
489 equals_val(__RET__, _Old_Val)
492 TypeV* remove(TypeK *key) {
493 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
498 @Interface: RemoveIfMatch
500 Write_Success_Point | RemoveIfMatch_Fail_Point
502 equals_val(spec_table_get(map, key), val)
504 COND_RemoveIfMatchSucc :: __RET__ == true
508 spec_table_put(map, key, NULL);
510 __COND_SAT__ ? __RET__ : !__RET__
513 bool remove(TypeK *key, TypeV *val) {
514 slot *val_slot = val == NULL ? NULL : new slot(false, val);
515 return putIfMatch(key, TOMBSTONE, val) == val;
521 @Interface: ReplaceAny
526 void *_Old_Val = spec_table_get(map, key);
528 equals_val(__RET__, _Old_Val)
531 TypeV* replace(TypeK *key, TypeV *val) {
532 return putIfMatch(key, val, MATCH_ANY);
537 @Interface: ReplaceIfMatch
539 Write_Success_Point | ReplaceIfMatch_Fail_Point
541 equals_val(spec_table_get(map, key), oldval)
543 COND_ReplaceIfMatchSucc :: __RET__ == true
547 spec_table_put(map, key, newval);
549 __COND_SAT__ ? __RET__ : !__RET__
552 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
553 return putIfMatch(key, newval, oldval) == oldval;
557 static CHM* get_chm(kvs_data* kvs) {
558 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
562 static int* get_hashes(kvs_data *kvs) {
563 return (int *) kvs->_data[1].load(memory_order_relaxed);
566 // Preserve happens-before semantics on newly inserted keys
567 static inline slot* key(kvs_data *kvs, int idx) {
568 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
569 // Corresponding to the volatile read in get_impl() and putIfMatch in
570 // Cliff Click's Java implementation
571 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
576 The atomic operation in val() function is a "potential" commit point,
577 which means in some case it is a real commit point while it is not for
578 some other cases. This so happens because the val() function is such a
579 fundamental function that many internal operation will call. Our
580 strategy is that we label any potential commit points and check if they
581 really are the commit points later.
583 // Preserve happens-before semantics on newly inserted values
584 static inline slot* val(kvs_data *kvs, int idx) {
585 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
586 // Corresponding to the volatile read in get_impl() and putIfMatch in
587 // Cliff Click's Java implementation
588 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
591 # This is a complicated potential commit point since many many functions are
593 @Potential_commit_point_define: true
594 @Label: Read_Val_Point
602 static int hash(slot *key_slot) {
603 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
604 TypeK* key = (TypeK*) key_slot->_ptr;
605 int h = key->hashCode();
606 // Spread bits according to Cliff Click's code
607 h += (h << 15) ^ 0xffffcd7d;
611 h += (h << 2) + (h << 14);
612 return h ^ (h >> 16);
615 // Heuristic to decide if reprobed too many times.
616 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
617 // put it triggers a table resize. Several places MUST have exact agreement.
618 static int reprobe_limit(int len) {
619 return REPROBE_LIMIT + (len >> 2);
622 static inline bool is_prime(slot *val) {
623 return (val != NULL) && val->_prime;
626 // Check for key equality. Try direct pointer comparison first (fast
627 // negative teset) and then the full 'equals' call
628 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
630 // Caller should've checked this.
631 MODEL_ASSERT (K != NULL);
632 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
635 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
637 key_ptr->equals(K->_ptr));
640 static bool valeq(slot *val_slot1, slot *val_slot2) {
641 MODEL_ASSERT (val_slot1 != NULL);
642 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
643 if (val_slot2 == NULL || ptr1 == NULL) return false;
644 return ptr1->equals(val_slot2->_ptr);
647 // Together with key() preserve the happens-before relationship on newly
649 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
650 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
651 desired, memory_order_release, memory_order_release);
655 Same as the val() function, we only label the CAS operation as the
656 potential commit point.
658 // Together with val() preserve the happens-before relationship on newly
660 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
662 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
663 desired, memory_order_release, memory_order_release);
665 # If it is a successful put instead of a copy or any other internal
666 # operantions, expected != NULL
668 @Potential_commit_point_define: res == true
669 @Label: Write_Val_Point
675 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
677 int len = kvs->_size;
678 CHM *chm = get_chm(kvs);
679 int *hashes = get_hashes(kvs);
681 int idx = fullhash & (len - 1);
684 slot *K = key(kvs, idx);
685 slot *V = val(kvs, idx);
688 @Commit_point_define: K == NULL
689 @Potential_commit_point_label: Read_Val_Point
690 @Label: Get_Success_Point_1
694 if (K == NULL) return NULL; // A miss
696 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
697 // Key hit! Check if table-resize in progress
701 @Commit_point_define: true
702 @Potential_commit_point_label: Read_Val_Point
703 @Label: Get_Success_Point_2
706 return (V == TOMBSTONE) ? NULL : V; // Return this value
708 // Otherwise, finish the copy & retry in the new table
709 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
710 idx, key_slot), key_slot, fullhash);
713 if (++reprobe_cnt >= REPROBE_LIMIT ||
714 key_slot == TOMBSTONE) {
715 // Retry in new table
716 // Atomic read (acquire) can be here
717 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
720 @Commit_point_define_check: newkvs == NULL
721 @Label: Get_Success_Point_3
724 return newkvs == NULL ? NULL : get_impl(topmap,
725 topmap->help_copy(newkvs), key_slot, fullhash);
728 idx = (idx + 1) & (len - 1); // Reprobe by 1
732 // A wrapper of the essential function putIfMatch()
733 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
734 // TODO: Should throw an exception rather return NULL
735 if (old_val == NULL) {
738 slot *key_slot = new slot(false, key);
740 slot *value_slot = new slot(false, value);
741 kvs_data *kvs = _kvs.load(memory_order_acquire);
742 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
743 // Only when copy_slot() call putIfMatch() will it return NULL
744 MODEL_ASSERT (res != NULL);
745 MODEL_ASSERT (!is_prime(res));
746 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
750 Put, Remove, PutIfAbsent, etc will call this function. Return the old
751 value. If the returned value is equals to the expVal (or expVal is
752 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
753 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
754 NULL if passed a NULL expVal.
756 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
757 *key_slot, slot *val_slot, slot *expVal) {
758 MODEL_ASSERT (val_slot != NULL);
759 MODEL_ASSERT (!is_prime(val_slot));
760 MODEL_ASSERT (!is_prime(expVal));
762 int fullhash = hash(key_slot);
763 int len = kvs->_size;
764 CHM *chm = get_chm(kvs);
765 int *hashes = get_hashes(kvs);
766 int idx = fullhash & (len - 1);
774 while (true) { // Spin till we get a key slot
777 if (K == NULL) { // Get a free slot
778 if (val_slot == TOMBSTONE) return val_slot;
779 // Claim the null key-slot
780 if (CAS_key(kvs, idx, NULL, key_slot)) {
781 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
782 hashes[idx] = fullhash; // Memorize full hash
785 K = key(kvs, idx); // CAS failed, get updated value
786 MODEL_ASSERT (K != NULL);
789 // Key slot not null, there exists a Key here
790 if (keyeq(K, key_slot, hashes, idx, fullhash))
793 // Notice that the logic here should be consistent with that of get.
794 // The first predicate means too many reprobes means nothing in the
796 if (++reprobe_cnt >= reprobe_limit(len) ||
797 K == TOMBSTONE) { // Found a Tombstone key, no more keys
798 newkvs = chm->resize(topmap, kvs);
799 model_print("resize1\n");
800 // Help along an existing copy
801 if (expVal != NULL) topmap->help_copy(newkvs);
802 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
805 idx = (idx + 1) & (len - 1); // Reprobe
806 } // End of spinning till we get a Key slot
808 if (val_slot == V) return V; // Fast cutout for no-change
810 // Here it tries to resize cause it doesn't want other threads to stop
811 // its progress (eagerly try to resize soon)
812 newkvs = chm->_newkvs.load(memory_order_acquire);
813 if (newkvs == NULL &&
814 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
815 model_print("resize2\n");
816 newkvs = chm->resize(topmap, kvs); // Force the copy to start
819 // Finish the copy and then put it in the new table
821 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
822 expVal), key_slot, val_slot, expVal);
824 // Decided to update the existing table
826 MODEL_ASSERT (!is_prime(V));
828 if (expVal != NO_MATCH_OLD &&
830 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
831 !(V == NULL && expVal == TOMBSTONE) &&
832 (expVal == NULL || !valeq(expVal, V))) {
835 @Commit_point_define: expVal == TOMBSTONE
836 @Potential_commit_point_label: Read_Val_Point
837 @Label: PutIfAbsent_Fail_Point
838 # This is a check for the PutIfAbsent() when the value
844 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
845 @Potential_commit_point_label: Read_Val_Point
846 @Label: RemoveIfMatch_Fail_Point
851 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
852 @Potential_commit_point_label: Read_Val_Point
853 @Label: ReplaceIfMatch_Fail_Point
856 return V; // Do not update!
859 if (CAS_val(kvs, idx, V, val_slot)) {
862 # The only point where a successful put happens
863 @Commit_point_define: true
864 @Potential_commit_point_label: Write_Val_Point
865 @Label: Write_Success_Point
868 if (expVal != NULL) { // Not called by a table-copy
869 // CAS succeeded, should adjust size
870 // Both normal put's and table-copy calls putIfMatch, but
871 // table-copy does not increase the number of live K/V pairs
872 if ((V == NULL || V == TOMBSTONE) &&
873 val_slot != TOMBSTONE)
874 chm->_size.fetch_add(1, memory_order_relaxed);
875 if (!(V == NULL || V == TOMBSTONE) &&
876 val_slot == TOMBSTONE)
877 chm->_size.fetch_add(-1, memory_order_relaxed);
879 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
884 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
885 idx, expVal), key_slot, val_slot, expVal);
889 // Help along an existing table-resize. This is a fast cut-out wrapper.
890 kvs_data* help_copy(kvs_data *helper) {
891 kvs_data *topkvs = _kvs.load(memory_order_acquire);
892 CHM *topchm = get_chm(topkvs);
893 // No cpy in progress
894 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
895 topchm->help_copy_impl(this, topkvs, false);