1 #ifndef SIMPLIFIED_CLIFFC_HASHTABLE_H
2 #define SIMPLIFIED_CLIFFC_HASHTABLE_H
14 This header file declares and defines a simplified version of Cliff Click's
15 NonblockingHashMap. It contains all the necessary structrues and main
16 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
20 template<typename TypeK, typename TypeV>
21 class cliffc_hashtable;
24 Corresponding the the Object[] array in Cliff Click's Java implementation.
25 It keeps the first two slots for CHM (Hashtable control unit) and the hash
26 records (an array of hash used for fast negative key-equality check).
34 int real_size = sizeof(atomic<void*>) * 2 + 2;
35 _data = new atomic<void*>[real_size];
36 // The control block should be initialized in resize()
37 // Init the hash record array
38 int *hashes = new int[_size];
40 for (i = 0; i < _size; i++) {
43 _data[1].store(hashes, memory_order_relaxed);
44 // Init the data to Null slot
45 for (i = 2; i < real_size; i++) {
46 _data[i].store(NULL, memory_order_relaxed);
51 int *hashes = (int*) _data[1].load(memory_order_relaxed);
59 shared_ptr<void> _ptr;
61 slot(bool prime, shared_ptr<void> ptr) {
70 TypeK must have defined function "int hashCode()" which return the hash
71 code for the its object, and "int equals(TypeK anotherKey)" which is
72 used to judge equality.
73 TypeK and TypeV should define their own copy constructor.
74 To make the memory management safe and similar to Cliff Click's Java
75 implementation, we use shared_ptr instead of normal pointer in terms of the
76 pointers that point to TypeK and TypeV.
78 template<typename TypeK, typename TypeV>
79 class cliffc_hashtable {
81 # The synchronization we have for the hashtable gives us the property of
82 # serializability, so we should have a sequential hashtable when we check the
83 # correctness. The key thing is to identify all the commit point.
88 CLASS = cliffc_hashtable;
91 spec_hashtable<TypeK, TypeV*> map;
92 spec_hashtable<TypeK, Tag> id_map;
95 map = spec_hashtable<TypeK, TypeV*>();
96 id_map = spec_hashtable<TypeK, TypeV*>();
99 static bool equals_val(TypeV *ptr1, TypeV *ptr2) {
103 # Update the tag for the current key slot if the corresponding tag
104 # is NULL, otherwise just return that tag. It will update the next
105 # available tag too if it requires a new tag for that key slot.
106 static Tag getKeyTag(TypeK &key) {
107 if (id_map.get(key) == NULL) {
108 Tag cur_tag = tag.current();
109 id_map.put(key, cur_tag);
113 return id_map.get(key);
129 PutIfAbsent(COND_PutIfAbsentSucc),
131 RemoveIfMatch(COND_RemoveIfMatchSucc),
133 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
136 Write_interface -> Read_interface
142 The control structure for the hashtable
146 friend class cliffc_hashtable;
148 atomic<kvs_data*> _newkvs;
150 // Size of active K,V pairs
153 // Count of used slots
156 // The next part of the table to copy
157 atomic_int _copy_idx;
159 // Work-done reporting
160 atomic_int _copy_done;
164 _size.store(size, memory_order_relaxed);
165 _slots.store(0, memory_order_relaxed);
167 _copy_idx.store(0, memory_order_relaxed);
168 _copy_done.store(0, memory_order_release);
175 // Heuristic to decide if the table is too full
176 bool table_full(int reprobe_cnt, int len) {
178 reprobe_cnt >= REPROBE_LIMIT &&
179 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
182 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
183 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
187 // No copy in-progress, start one; Only double the table size
188 int oldlen = kvs->_size;
189 int sz = _size.load(memory_order_relaxed);
192 // Just follow Cliff Click's heuristic to decide the new size
193 if (sz >= (oldlen >> 2)) { // If we are 25% full
194 newsz = oldlen << 1; // Double size
195 if (sz >= (oldlen >> 1))
196 newsz = oldlen << 2; // Double double size
199 // We do not record the record timestamp
200 if (newsz <= oldlen) newsz = oldlen << 1;
201 // Do not shrink ever
202 if (newsz < oldlen) newsz = oldlen;
204 // Last check cause the 'new' below is expensive
205 newkvs = _newkvs.load(memory_order_acquire);
206 if (newkvs != NULL) return newkvs;
208 newkvs = new kvs_data(newsz);
209 void *chm = (void*) new CHM(sz);
210 newkvs->_data[0].store(chm, memory_order_relaxed);
212 kvs_data *cur_newkvs;
213 // Another check after the slow allocation
214 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
216 // CAS the _newkvs to the allocated table
217 kvs_data *desired = (kvs_data*) NULL;
218 kvs_data *expected = (kvs_data*) newkvs;
219 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
220 memory_order_release)) {
221 // Should clean the allocated area
223 newkvs = _newkvs.load(memory_order_acquire);
228 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
230 assert (get_chm(oldkvs) == this);
231 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
232 int oldlen = oldkvs->_size;
233 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
235 // Just follow Cliff Click's code here
236 int panic_start = -1;
238 while (_copy_done.load(memory_order_acquire) < oldlen) {
239 copyidx = _copy_idx.load(memory_order_acquire);
240 if (panic_start == -1) { // No painc
241 copyidx = _copy_idx.load(memory_order_acquire);
242 while (copyidx < (oldlen << 1) &&
243 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
244 min_copy_work, memory_order_release, memory_order_release))
245 copyidx = _copy_idx.load(memory_order_relaxed);
246 if (!(copyidx < (oldlen << 1)))
247 panic_start = copyidx;
250 // Now copy the chunk of work we claimed
252 for (int i = 0; i < min_copy_work; i++)
253 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
257 copy_check_and_promote(topmap, oldkvs, workdone);
259 copyidx += min_copy_work;
260 if (!copy_all && panic_start == -1)
261 return; // We are done with the work we claim
263 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
266 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
267 *oldkvs, int idx, void *should_help) {
268 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
269 // We're only here cause the caller saw a Prime
270 if (copy_slot(topmap, idx, oldkvs, _newkvs))
271 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
272 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
275 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
276 oldkvs, int workdone) {
277 int oldlen = oldkvs->_size;
278 int copyDone = _copy_done.load(memory_order_relaxed);
281 copyDone = _copy_done.load(memory_order_relaxed);
282 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
283 workdone, memory_order_relaxed, memory_order_relaxed))
288 // Promote the new table to the current table
289 if (copyDone + workdone == oldlen &&
290 topmap->_kvs.load(memory_order_acquire) == oldkvs)
291 topmap->_kvs.compare_exchange_strong(oldkvs, _newkvs, memory_order_release,
292 memory_order_release);
295 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
298 while ((key_slot = key(oldkvs, idx)) == NULL)
299 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
301 // First CAS old to Prime
302 slot *oldval = val(oldkvs, idx, NULL);
303 while (!is_prime(oldval)) {
304 slot *box = (oldval == NULL || oldval == TOMBSTONE)
305 ? TOMBPRIME : new slot(true, oldval->_ptr);
306 if (CAS_val(oldkvs, idx, oldval, box)) {
307 if (box == TOMBPRIME)
308 return 1; // Copy done
309 // Otherwise we CAS'd the box
310 oldval = box; // Record updated oldval
313 oldval = val(oldkvs, idx, NULL); // Else re-try
316 if (oldval == TOMBPRIME) return false; // Copy already completed here
318 slot *old_unboxed = new slot(false, oldval->_ptr);
319 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
322 // Old value is exposed in the new table
323 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
324 oldval = val(oldkvs, idx, NULL);
326 return copied_into_new;
333 static const int Default_Init_Size = 8; // Intial table size
335 static slot* const MATCH_ANY;
336 static slot* const NO_MATCH_OLD;
338 static slot* const TOMBPRIME;
339 static slot* const TOMBSTONE;
341 static const int REPROBE_LIMIT = 10; // Forces a table-resize
343 atomic<kvs_data*> _kvs;
347 // Should initialize the CHM for the construction of the table
348 // For other CHM in kvs_data, they should be initialzed in resize()
349 // because the size is determined dynamically
350 kvs_data *kvs = new kvs_data(Default_Init_Size);
351 void *chm = (void*) new CHM(0);
352 kvs->_data[0].store(chm, memory_order_relaxed);
353 _kvs.store(kvs, memory_order_release);
356 cliffc_hashtable(int init_size) {
357 // Should initialize the CHM for the construction of the table
358 // For other CHM in kvs_data, they should be initialzed in resize()
359 // because the size is determined dynamically
360 kvs_data *kvs = new kvs_data(init_size);
361 void *chm = (void*) new CHM(0);
362 kvs->_data[0].store(chm, memory_order_relaxed);
363 _kvs.store(kvs, memory_order_release);
369 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
370 @ID: __sequential.getKeyTag(key)
372 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
374 __sequential.equals_val(_Old_Val, __RET__)
377 shared_ptr<TypeV> get(TypeK& key) {
378 void *key_ptr = (void*) new TypeK(key);
379 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
380 int fullhash = hash(key_slot);
381 slot *V = get_impl(this, _kvs, key_slot, fullhash);
382 if (V == NULL) return NULL;
383 assert (!is_prime(V));
384 return static_pointer_cast<TypeV>(V->_ptr);
390 @Commit_point_set: Write_Val_Point
391 @ID: __sequential.getKeyTag(key)
393 # Remember this old value at checking point
394 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
395 @Code: __sequential.map.put(key, &val);
397 __sequential.equals_val(__RET__, _Old_Val)
400 shared_ptr<TypeV> put(TypeK& key, TypeV& val) {
401 return putIfMatch(key, val, NO_MATCH_OLD);
406 @Interface: PutIfAbsent
408 Write_Val_Point | PutIfAbsent_Fail_Point
409 @Condition: __sequential.map.get(key) == NULL
411 COND_PutIfAbsentSucc :: __RET__ == NULL
412 @ID: __sequential.getKeyTag(key)
414 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
417 __sequential.map.put(key, &value);
419 __COND_SAT__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__)
422 shared_ptr<TypeV> putIfAbsent(TypeK& key, TypeV& value) {
423 return putIfMatch(key, val, TOMBSTONE);
428 @Interface: RemoveAny
429 @Commit_point_set: Write_Val_Point
430 @ID: __sequential.getKeyTag(key)
432 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
433 @Code: __sequential.map.put(key, NULL);
435 __sequential.equals_val(__RET__, _Old_Val)
438 shared_ptr<TypeV> remove(TypeK& key) {
439 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
444 @Interface: RemoveIfMatch
446 Write_Val_Point | RemoveIfMatch_Fail_Point
448 __sequential.equals_val(__sequential.map.get(key), &val)
450 COND_RemoveIfMatchSucc :: __RET__ == true
451 @ID: __sequential.getKeyTag(key)
455 __sequential.map.put(key, NULL);
457 __COND_SAY__ ? __RET__ : !__RET__
460 bool remove(TypeK& key, TypeV& val) {
461 slot *val_slot = val == NULL ? NULL : new slot(false, val);
462 return putIfMatch(key, TOMBSTONE, val) == val;
468 @Interface: ReplaceAny
471 @ID: __sequential.getKeyTag(key)
473 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
475 __sequential.equals_val(__RET__, _Old_Val)
478 shared_ptr<TypeV> replace(TypeK& key, TypeV& val) {
479 return putIfMatch(key, val, MATCH_ANY);
484 @Interface: ReplaceIfMatch
486 Write_Val_Point | ReplaceIfMatch_Fail_Point
488 __sequential.equals_val(__sequential.map.get(key), &oldval)
490 COND_ReplaceIfMatchSucc :: __RET__ == true
491 @ID: __sequential.getKeyTag(key)
495 __sequential.map.put(key, &newval);
497 __COND_SAY__ ? __RET__ : !__RET__
500 bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
501 return putIfMatch(key, newval, oldval) == oldval;
505 static CHM* get_chm(kvs_data* kvs) {
506 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
509 static int* get_hashes(kvs_data *kvs) {
510 return (int *) kvs->_data[1].load(memory_order_relaxed);
513 // Preserve happens-before semantics on newly inserted keys
514 static inline slot* key(kvs_data *kvs, int idx) {
515 assert (idx >= 0 && idx < kvs->_size);
516 // Corresponding to the volatile read in get_impl() and putIfMatch in
517 // Cliff Click's Java implementation
518 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
522 The atomic operation in val() function is a "potential" commit point,
523 which means in some case it is a real commit point while it is not for
524 some other cases. This so happens because the val() function is such a
525 fundamental function that many internal operation will call. Our
526 strategy is that we label any potential commit points and check if they
527 really are the commit points later.
529 // Preserve happens-before semantics on newly inserted values
530 static inline slot* val(kvs_data *kvs, int idx) {
531 assert (idx >= 0 && idx < kvs->_size);
532 // Corresponding to the volatile read in get_impl() and putIfMatch in
533 // Cliff Click's Java implementation
534 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
537 # This is a complicated potential commit point since many many functions are
539 @Potential_commit_point_define: true
540 @Label: Read_Val_Point
548 static int hash(slot *key_slot) {
549 assert(key_slot != NULL && key_slot->_ptr != NULL);
550 shared_ptr<TypeK> key = static_pointer_cast<TypeK>(key_slot->_ptr);
551 int h = key->hashCode();
552 // Spread bits according to Cliff Click's code
553 h += (h << 15) ^ 0xffffcd7d;
557 h += (h << 2) + (h << 14);
558 return h ^ (h >> 16);
561 // Heuristic to decide if reprobed too many times.
562 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
563 // put it triggers a table resize. Several places MUST have exact agreement.
564 static int reprobe_limit(int len) {
565 return REPROBE_LIMIT + (len >> 2);
568 static inline bool is_prime(slot *val) {
569 return (val != NULL) && val->_prime;
572 // Check for key equality. Try direct pointer comparison first (fast
573 // negative teset) and then the full 'equals' call
574 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
576 // Caller should've checked this.
578 shared_ptr<TypeK> key_ptr = static_pointer_cast<TypeK>(key_slot->_ptr);
581 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
583 key_ptr->equals(K->_ptr));
586 static bool valeq(slot *val_slot1, slot *val_slot2) {
587 assert (val_slot1 != NULL);
588 shared_ptr<TypeK> ptr1 = static_pointer_cast<TypeV>(val_slot1->_ptr);
589 if (val_slot2 == NULL || ptr1 == NULL) return false;
590 return ptr1->equals(val_slot2->_ptr);
593 // Together with key() preserve the happens-before relationship on newly
595 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
596 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
597 desired, memory_order_release, memory_order_release);
601 Same as the val() function, we only label the CAS operation as the
602 potential commit point.
604 // Together with val() preserve the happens-before relationship on newly
606 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
608 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
609 desired, memory_order_release, memory_order_release);
611 # If it is a successful put instead of a copy or any other internal
612 # operantions, expected != NULL
614 @Potential_commit_point_define: __ATOMIC_RET__ == true
615 @Label: Write_Val_Point
621 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
623 int len = kvs->_size;
624 CHM *chm = get_chm(kvs);
625 int *hashes = get_hashes(kvs);
627 int idx = fullhash & (len - 1);
630 slot *K = key(kvs, idx);
631 slot *V = val(kvs, idx);
634 @Commit_point_define: V == NULL
635 @Potential_commit_point_label: Read_Val_Point
636 @Label: Get_Success_Point_1
640 if (V == NULL) return NULL; // A miss
642 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
643 // Key hit! Check if table-resize in progress
647 @Commit_point_define: true
648 @Potential_commit_point_label: Read_Val_Point
649 @Label: Get_Success_Point_2
652 return (V == TOMBSTONE) ? NULL : V; // Return this value
654 // Otherwise, finish the copy & retry in the new table
655 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
656 idx, key_slot), key_slot, fullhash);
659 if (++reprobe_cnt >= REPROBE_LIMIT ||
660 key_slot == TOMBSTONE) {
661 // Retry in new table
662 // Atomic read (acquire) can be here
663 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
666 @Commit_point_define_check: newkvs == NULL
667 @Label: Get_Success_Point_3
670 return newkvs == NULL ? NULL : get_impl(topmap,
671 topmap->help_copy(newkvs), key_slot, fullhash);
674 idx = (idx + 1) & (len - 1); // Reprobe by 1
678 // A wrapper of the essential function putIfMatch()
679 shared_ptr<TypeV> putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
680 // TODO: Should throw an exception rather return NULL
681 if (old_val == NULL) {
684 void *key_ptr = (void*) new TypeK(key);
685 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
687 void *val_ptr = (void*) new TypeV(value);
688 slot *value_slot = new slot(false, shared_ptr<void>(val_ptr));
689 slot *res = putIfMatch(this, _kvs, key_slot, value_slot, old_val);
690 // Only when copy_slot() call putIfMatch() will it return NULL
691 assert (res != NULL);
692 assert (!is_prime(res));
693 return res == TOMBSTONE ? NULL : static_pointer_cast<TypeV>(res->_ptr);
697 Put, Remove, PutIfAbsent, etc will call this function. Return the old
698 value. If the returned value is equals to the expVal (or expVal is
699 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
700 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
701 NULL if passed a NULL expVal.
703 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
704 *key_slot, slot *val_slot, slot *expVal) {
705 assert (val_slot != NULL);
706 assert (!is_prime(val_slot));
707 assert (!is_prime(expVal));
709 int fullhash = hash(key_slot);
710 int len = kvs->_size;
711 CHM *chm = get_chm(kvs);
712 int *hashes = get_hashes(kvs);
713 int idx = fullhash & (len - 1);
721 while (true) { // Spin till we get a key slot
723 V = val(kvs, idx, NULL);
724 if (K == NULL) { // Get a free slot
725 if (val_slot == TOMBSTONE) return val_slot;
726 // Claim the null key-slot
727 if (CAS_key(kvs, idx, NULL, key_slot)) {
728 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
729 hashes[idx] = fullhash; // Memorize full hash
732 K = key(kvs, idx); // CAS failed, get updated value
736 // Key slot not null, there exists a Key here
737 if (keyeq(K, key_slot, hashes, idx, fullhash))
740 // Notice that the logic here should be consistent with that of get.
741 // The first predicate means too many reprobes means nothing in the
743 if (++reprobe_cnt >= reprobe_limit(len) ||
744 K == TOMBSTONE) { // Found a Tombstone key, no more keys
745 newkvs = chm->resize(topmap, kvs);
746 // Help along an existing copy
747 if (expVal != NULL) topmap->help_copy(newkvs);
748 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
751 idx = (idx + 1) & (len - 1); // Reprobe
752 } // End of spinning till we get a Key slot
754 if (val_slot == V) return V; // Fast cutout for no-change
756 // Here it tries to resize cause it doesn't want other threads to stop
757 // its progress (eagerly try to resize soon)
758 newkvs = chm->_newkvs.load(memory_order_acquire);
759 if (newkvs == NULL &&
760 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
761 newkvs = chm->resize(topmap, kvs); // Force the copy to start
763 // Finish the copy and then put it in the new table
765 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
766 expVal), key_slot, val_slot, expVal);
768 // Decided to update the existing table
770 assert (!is_prime(V));
772 if (expVal != NO_MATCH_OLD &&
774 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
775 !(V == NULL && expVal == TOMBSTONE) &&
776 (expVal == NULL || !valeq(expVal, V))) {
779 @Commit_point_define: expVal == TOMBSTONE
780 @Potential_commit_point_label: Read_Val_Point
781 @Label: PutIfAbsent_Fail_Point
782 # This is a check for the PutIfAbsent() when the value
788 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
789 @Potential_commit_point_label: Read_Val_Point
790 @Label: RemoveIfMatch_Fail_Point
795 @Commit_point_define: !valeq(expVal, V)
796 @Potential_commit_point_label: Read_Val_Point
797 @Label: ReplaceIfMatch_Fail_Point
800 return V; // Do not update!
803 if (CAS_val(kvs, idx, V, val_slot)) {
806 # The only point where a successful put happens
807 @Commit_point_define: true
808 @Potential_commit_point_label: Write_Val_Point
809 @Label: Write_Success_Point
812 if (expVal != NULL) { // Not called by a table-copy
813 // CAS succeeded, should adjust size
814 // Both normal put's and table-copy calls putIfMatch, but
815 // table-copy does not increase the number of live K/V pairs
816 if ((V == NULL || V == TOMBSTONE) &&
817 val_slot != TOMBSTONE)
818 chm->_size.fetch_add(1, memory_order_relaxed);
819 if (!(V == NULL || V == TOMBSTONE) &&
820 val_slot == TOMBSTONE)
821 chm->_size.fetch_add(-1, memory_order_relaxed);
823 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
826 V = val(kvs, idx, NULL);
828 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
829 idx, expVal), key_slot, val_slot, expVal);
833 // Help along an existing table-resize. This is a fast cut-out wrapper.
834 kvs_data* help_copy(kvs_data *helper) {
835 kvs_data *topkvs = _kvs.load(memory_order_acquire);
836 CHM *topchm = get_chm(topkvs);
837 // No cpy in progress
838 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
839 topchm->help_copy_impl(this, topkvs, false);