1 #ifndef SIMPLIFIED_CLIFFC_HASHTABLE_H
2 #define SIMPLIFIED_CLIFFC_HASHTABLE_H
14 This header file declares and defines a simplified version of Cliff Click's
15 NonblockingHashMap. It contains all the necessary structrues and main
16 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
20 template<typename TypeK, typename TypeV>
21 class cliffc_hashtable;
24 Corresponding the the Object[] array in Cliff Click's Java implementation.
25 It keeps the first two slots for CHM (Hashtable control unit) and the hash
26 records (an array of hash used for fast negative key-equality check).
34 int real_size = sizeof(atomic<void*>) * 2 + 2;
35 _data = new atomic<void*>[real_size];
36 // The control block should be initialized in resize()
37 // Init the hash record array
38 int *hashes = new int[_size];
40 for (i = 0; i < _size; i++) {
43 _data[1].store(hashes, memory_order_relaxed);
44 // Init the data to Null slot
45 for (i = 2; i < real_size; i++) {
46 _data[i].store(NULL, memory_order_relaxed);
51 int *hashes = (int*) _data[1].load(memory_order_relaxed);
59 shared_ptr<void> _ptr;
61 slot(bool prime, shared_ptr<void> ptr) {
70 TypeK must have defined function "int hashCode()" which return the hash
71 code for the its object, and "int equals(TypeK anotherKey)" which is
72 used to judge equality.
73 TypeK and TypeV should define their own copy constructor.
74 To make the memory management safe and similar to Cliff Click's Java
75 implementation, we use shared_ptr instead of normal pointer in terms of the
76 pointers that point to TypeK and TypeV.
78 template<typename TypeK, typename TypeV>
79 class cliffc_hashtable {
81 # The synchronization we have for the hashtable gives us the property of
82 # serializability, so we should have a sequential hashtable when we check the
83 # correctness. The key thing is to identify all the commit point.
88 spec_hashtable<TypeK, TypeV*> map;
89 spec_hashtable<TypeK, Tag> id_map;
92 map = spec_hashtable<TypeK, TypeV*>();
93 id_map = spec_hashtable<TypeK, TypeV*>();
96 static bool equals_val(TypeV *ptr1, TypeV *ptr2) {
100 # Update the tag for the current key slot if the corresponding tag
101 # is NULL, otherwise just return that tag. It will update the next
102 # available tag too if it requires a new tag for that key slot.
103 static Tag getKeyTag(TypeK &key) {
104 if (id_map.get(key) == NULL) {
105 Tag cur_tag = tag.current();
106 id_map.put(key, cur_tag);
110 return id_map.get(key);
126 PutIfAbsent(COND_PutIfAbsentSucc),
128 RemoveIfMatch(COND_RemoveIfMatchSucc),
130 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
133 Write_interface -> Read_interface
139 The control structure for the hashtable
143 friend class cliffc_hashtable;
145 atomic<kvs_data*> _newkvs;
147 // Size of active K,V pairs
150 // Count of used slots
153 // The next part of the table to copy
154 atomic_int _copy_idx;
156 // Work-done reporting
157 atomic_int _copy_done;
161 _size.store(size, memory_order_relaxed);
162 _slots.store(0, memory_order_relaxed);
164 _copy_idx.store(0, memory_order_relaxed);
165 _copy_done.store(0, memory_order_release);
172 // Heuristic to decide if the table is too full
173 bool table_full(int reprobe_cnt, int len) {
175 reprobe_cnt >= REPROBE_LIMIT &&
176 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
179 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
180 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
184 // No copy in-progress, start one; Only double the table size
185 int oldlen = kvs->_size;
186 int sz = _size.load(memory_order_relaxed);
189 // Just follow Cliff Click's heuristic to decide the new size
190 if (sz >= (oldlen >> 2)) { // If we are 25% full
191 newsz = oldlen << 1; // Double size
192 if (sz >= (oldlen >> 1))
193 newsz = oldlen << 2; // Double double size
196 // We do not record the record timestamp
197 if (newsz <= oldlen) newsz = oldlen << 1;
198 // Do not shrink ever
199 if (newsz < oldlen) newsz = oldlen;
201 // Last check cause the 'new' below is expensive
202 newkvs = _newkvs.load(memory_order_acquire);
203 if (newkvs != NULL) return newkvs;
205 newkvs = new kvs_data(newsz);
206 void *chm = (void*) new CHM(sz);
207 newkvs->_data[0].store(chm, memory_order_relaxed);
209 kvs_data *cur_newkvs;
210 // Another check after the slow allocation
211 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
213 // CAS the _newkvs to the allocated table
214 kvs_data *desired = (kvs_data*) NULL;
215 kvs_data *expected = (kvs_data*) newkvs;
216 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
217 memory_order_release)) {
218 // Should clean the allocated area
220 newkvs = _newkvs.load(memory_order_acquire);
225 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
227 assert (get_chm(oldkvs) == this);
228 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
229 int oldlen = oldkvs->_size;
230 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
232 // Just follow Cliff Click's code here
233 int panic_start = -1;
235 while (_copy_done.load(memory_order_acquire) < oldlen) {
236 copyidx = _copy_idx.load(memory_order_acquire);
237 if (panic_start == -1) { // No painc
238 copyidx = _copy_idx.load(memory_order_acquire);
239 while (copyidx < (oldlen << 1) &&
240 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
241 min_copy_work, memory_order_release, memory_order_release))
242 copyidx = _copy_idx.load(memory_order_relaxed);
243 if (!(copyidx < (oldlen << 1)))
244 panic_start = copyidx;
247 // Now copy the chunk of work we claimed
249 for (int i = 0; i < min_copy_work; i++)
250 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
254 copy_check_and_promote(topmap, oldkvs, workdone);
256 copyidx += min_copy_work;
257 if (!copy_all && panic_start == -1)
258 return; // We are done with the work we claim
260 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
263 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
264 *oldkvs, int idx, void *should_help) {
265 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
266 // We're only here cause the caller saw a Prime
267 if (copy_slot(topmap, idx, oldkvs, _newkvs))
268 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
269 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
272 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
273 oldkvs, int workdone) {
274 int oldlen = oldkvs->_size;
275 int copyDone = _copy_done.load(memory_order_relaxed);
278 copyDone = _copy_done.load(memory_order_relaxed);
279 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
280 workdone, memory_order_relaxed, memory_order_relaxed))
285 // Promote the new table to the current table
286 if (copyDone + workdone == oldlen &&
287 topmap->_kvs.load(memory_order_acquire) == oldkvs)
288 topmap->_kvs.compare_exchange_strong(oldkvs, _newkvs, memory_order_release,
289 memory_order_release);
292 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
295 while ((key_slot = key(oldkvs, idx)) == NULL)
296 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
298 // First CAS old to Prime
299 slot *oldval = val(oldkvs, idx, NULL);
300 while (!is_prime(oldval)) {
301 slot *box = (oldval == NULL || oldval == TOMBSTONE)
302 ? TOMBPRIME : new slot(true, oldval->_ptr);
303 if (CAS_val(oldkvs, idx, oldval, box)) {
304 if (box == TOMBPRIME)
305 return 1; // Copy done
306 // Otherwise we CAS'd the box
307 oldval = box; // Record updated oldval
310 oldval = val(oldkvs, idx, NULL); // Else re-try
313 if (oldval == TOMBPRIME) return false; // Copy already completed here
315 slot *old_unboxed = new slot(false, oldval->_ptr);
316 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
319 // Old value is exposed in the new table
320 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
321 oldval = val(oldkvs, idx, NULL);
323 return copied_into_new;
330 static const int Default_Init_Size = 8; // Intial table size
332 static slot* const MATCH_ANY;
333 static slot* const NO_MATCH_OLD;
335 static slot* const TOMBPRIME;
336 static slot* const TOMBSTONE;
338 static const int REPROBE_LIMIT = 10; // Forces a table-resize
340 atomic<kvs_data*> _kvs;
344 // Should initialize the CHM for the construction of the table
345 // For other CHM in kvs_data, they should be initialzed in resize()
346 // because the size is determined dynamically
347 kvs_data *kvs = new kvs_data(Default_Init_Size);
348 void *chm = (void*) new CHM(0);
349 kvs->_data[0].store(chm, memory_order_relaxed);
350 _kvs.store(kvs, memory_order_release);
353 cliffc_hashtable(int init_size) {
354 // Should initialize the CHM for the construction of the table
355 // For other CHM in kvs_data, they should be initialzed in resize()
356 // because the size is determined dynamically
357 kvs_data *kvs = new kvs_data(init_size);
358 void *chm = (void*) new CHM(0);
359 kvs->_data[0].store(chm, memory_order_relaxed);
360 _kvs.store(kvs, memory_order_release);
366 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
367 @ID: __sequential.getKeyTag(key)
369 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
371 __sequential.equals_val(_Old_Val, __RET__)
374 shared_ptr<TypeV> get(TypeK& key) {
375 void *key_ptr = (void*) new TypeK(key);
376 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
377 int fullhash = hash(key_slot);
378 slot *V = get_impl(this, _kvs, key_slot, fullhash);
379 if (V == NULL) return NULL;
380 assert (!is_prime(V));
381 return static_pointer_cast<TypeV>(V->_ptr);
387 @Commit_point_set: Write_Val_Point
388 @ID: __sequential.getKeyTag(key)
390 # Remember this old value at checking point
391 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
392 @Code: __sequential.map.put(key, &val);
394 __sequential.equals_val(__RET__, _Old_Val)
397 shared_ptr<TypeV> put(TypeK& key, TypeV& val) {
398 return putIfMatch(key, val, NO_MATCH_OLD);
403 @Interface: PutIfAbsent
405 Write_Val_Point | PutIfAbsent_Fail_Point
406 @Condition: __sequential.map.get(key) == NULL
408 COND_PutIfAbsentSucc :: __RET__ == NULL
409 @ID: __sequential.getKeyTag(key)
411 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
414 __sequential.map.put(key, &value);
416 __COND_SAY__ ? __RET__ == NULL : __sequential.equals_val(_Old_Val, __RET__)
419 shared_ptr<TypeV> putIfAbsent(TypeK& key, TypeV& value) {
420 return putIfMatch(key, val, TOMBSTONE);
425 @Interface: RemoveAny
426 @Commit_point_set: Write_Val_Point
427 @ID: __sequential.getKeyTag(key)
429 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
430 @Code: __sequential.map.put(key, NULL);
432 __sequential.equals_val(__RET__, _Old_Val)
435 shared_ptr<TypeV> remove(TypeK& key) {
436 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
441 @Interface: RemoveIfMatch
443 Write_Val_Point | RemoveIfMatch_Fail_Point
445 __sequential.equals_val(__sequential.map.get(key), &val)
447 COND_RemoveIfMatchSucc :: __RET__ == true
448 @ID: __sequential.getKeyTag(key)
452 __sequential.map.put(key, NULL);
454 __COND_SAY__ ? __RET__ : !__RET__
457 bool remove(TypeK& key, TypeV& val) {
458 slot *val_slot = val == NULL ? NULL : new slot(false, val);
459 return putIfMatch(key, TOMBSTONE, val) == val;
465 @Interface: ReplaceAny
468 @ID: __sequential.getKeyTag(key)
470 @DefineVar: TypeV *_Old_Val = __sequential.map.get(key)
472 __sequential.equals_val(__RET__, _Old_Val)
475 shared_ptr<TypeV> replace(TypeK& key, TypeV& val) {
476 return putIfMatch(key, val, MATCH_ANY);
481 @Interface: ReplaceIfMatch
483 Write_Val_Point | ReplaceIfMatch_Fail_Point
485 __sequential.equals_val(__sequential.map.get(key), &oldval)
487 COND_ReplaceIfMatchSucc :: __RET__ == true
488 @ID: __sequential.getKeyTag(key)
492 __sequential.map.put(key, &newval);
494 __COND_SAY__ ? __RET__ : !__RET__
497 bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
498 return putIfMatch(key, newval, oldval) == oldval;
502 static CHM* get_chm(kvs_data* kvs) {
503 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
506 static int* get_hashes(kvs_data *kvs) {
507 return (int *) kvs->_data[1].load(memory_order_relaxed);
510 // Preserve happens-before semantics on newly inserted keys
511 static inline slot* key(kvs_data *kvs, int idx) {
512 assert (idx >= 0 && idx < kvs->_size);
513 // Corresponding to the volatile read in get_impl() and putIfMatch in
514 // Cliff Click's Java implementation
515 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
519 The atomic operation in val() function is a "potential" commit point,
520 which means in some case it is a real commit point while it is not for
521 some other cases. This so happens because the val() function is such a
522 fundamental function that many internal operation will call. Our
523 strategy is that we label any potential commit points and check if they
524 really are the commit points later.
526 // Preserve happens-before semantics on newly inserted values
527 static inline slot* val(kvs_data *kvs, int idx) {
528 assert (idx >= 0 && idx < kvs->_size);
529 // Corresponding to the volatile read in get_impl() and putIfMatch in
530 // Cliff Click's Java implementation
531 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
534 # This is a complicated potential commit point since many many functions are
536 @Potential_commit_point_define: true
537 @Label: Read_Val_Point
545 static int hash(slot *key_slot) {
546 assert(key_slot != NULL && key_slot->_ptr != NULL);
547 shared_ptr<TypeK> key = static_pointer_cast<TypeK>(key_slot->_ptr);
548 int h = key->hashCode();
549 // Spread bits according to Cliff Click's code
550 h += (h << 15) ^ 0xffffcd7d;
554 h += (h << 2) + (h << 14);
555 return h ^ (h >> 16);
558 // Heuristic to decide if reprobed too many times.
559 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
560 // put it triggers a table resize. Several places MUST have exact agreement.
561 static int reprobe_limit(int len) {
562 return REPROBE_LIMIT + (len >> 2);
565 static inline bool is_prime(slot *val) {
566 return (val != NULL) && val->_prime;
569 // Check for key equality. Try direct pointer comparison first (fast
570 // negative teset) and then the full 'equals' call
571 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
573 // Caller should've checked this.
575 shared_ptr<TypeK> key_ptr = static_pointer_cast<TypeK>(key_slot->_ptr);
578 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
580 key_ptr->equals(K->_ptr));
583 static bool valeq(slot *val_slot1, slot *val_slot2) {
584 assert (val_slot1 != NULL);
585 shared_ptr<TypeK> ptr1 = static_pointer_cast<TypeV>(val_slot1->_ptr);
586 if (val_slot2 == NULL || ptr1 == NULL) return false;
587 return ptr1->equals(val_slot2->_ptr);
590 // Together with key() preserve the happens-before relationship on newly
592 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
593 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
594 desired, memory_order_release, memory_order_release);
598 Same as the val() function, we only label the CAS operation as the
599 potential commit point.
601 // Together with val() preserve the happens-before relationship on newly
603 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
605 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
606 desired, memory_order_release, memory_order_release);
608 # If it is a successful put instead of a copy or any other internal
609 # operantions, expected != NULL
611 @Potential_commit_point_define: __ATOMIC_RET__ == true
612 @Label: Write_Val_Point
618 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
620 int len = kvs->_size;
621 CHM *chm = get_chm(kvs);
622 int *hashes = get_hashes(kvs);
624 int idx = fullhash & (len - 1);
627 slot *K = key(kvs, idx);
628 slot *V = val(kvs, idx);
631 @Commit_point_define: V == NULL
632 @Potential_commit_point_label: Read_Val_Point
633 @Label: Get_Success_Point_1
637 if (V == NULL) return NULL; // A miss
639 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
640 // Key hit! Check if table-resize in progress
644 @Commit_point_define: true
645 @Potential_commit_point_label: Read_Val_Point
646 @Label: Get_Success_Point_2
649 return (V == TOMBSTONE) ? NULL : V; // Return this value
651 // Otherwise, finish the copy & retry in the new table
652 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
653 idx, key_slot), key_slot, fullhash);
656 if (++reprobe_cnt >= REPROBE_LIMIT ||
657 key_slot == TOMBSTONE) {
658 // Retry in new table
659 // Atomic read (acquire) can be here
660 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
663 @Commit_point_define_check: newkvs == NULL
664 @Label: Get_Success_Point_3
667 return newkvs == NULL ? NULL : get_impl(topmap,
668 topmap->help_copy(newkvs), key_slot, fullhash);
671 idx = (idx + 1) & (len - 1); // Reprobe by 1
675 // A wrapper of the essential function putIfMatch()
676 shared_ptr<TypeV> putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
677 // TODO: Should throw an exception rather return NULL
678 if (old_val == NULL) {
681 void *key_ptr = (void*) new TypeK(key);
682 slot *key_slot = new slot(false, shared_ptr<void>(key_ptr));
684 void *val_ptr = (void*) new TypeV(value);
685 slot *value_slot = new slot(false, shared_ptr<void>(val_ptr));
686 slot *res = putIfMatch(this, _kvs, key_slot, value_slot, old_val);
687 // Only when copy_slot() call putIfMatch() will it return NULL
688 assert (res != NULL);
689 assert (!is_prime(res));
690 return res == TOMBSTONE ? NULL : static_pointer_cast<TypeV>(res->_ptr);
694 Put, Remove, PutIfAbsent, etc will call this function. Return the old
695 value. If the returned value is equals to the expVal (or expVal is
696 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
697 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
698 NULL if passed a NULL expVal.
700 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
701 *key_slot, slot *val_slot, slot *expVal) {
702 assert (val_slot != NULL);
703 assert (!is_prime(val_slot));
704 assert (!is_prime(expVal));
706 int fullhash = hash(key_slot);
707 int len = kvs->_size;
708 CHM *chm = get_chm(kvs);
709 int *hashes = get_hashes(kvs);
710 int idx = fullhash & (len - 1);
718 while (true) { // Spin till we get a key slot
720 V = val(kvs, idx, NULL);
721 if (K == NULL) { // Get a free slot
722 if (val_slot == TOMBSTONE) return val_slot;
723 // Claim the null key-slot
724 if (CAS_key(kvs, idx, NULL, key_slot)) {
725 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
726 hashes[idx] = fullhash; // Memorize full hash
729 K = key(kvs, idx); // CAS failed, get updated value
733 // Key slot not null, there exists a Key here
734 if (keyeq(K, key_slot, hashes, idx, fullhash))
737 // Notice that the logic here should be consistent with that of get.
738 // The first predicate means too many reprobes means nothing in the
740 if (++reprobe_cnt >= reprobe_limit(len) ||
741 K == TOMBSTONE) { // Found a Tombstone key, no more keys
742 newkvs = chm->resize(topmap, kvs);
743 // Help along an existing copy
744 if (expVal != NULL) topmap->help_copy(newkvs);
745 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
748 idx = (idx + 1) & (len - 1); // Reprobe
749 } // End of spinning till we get a Key slot
751 if (val_slot == V) return V; // Fast cutout for no-change
753 // Here it tries to resize cause it doesn't want other threads to stop
754 // its progress (eagerly try to resize soon)
755 newkvs = chm->_newkvs.load(memory_order_acquire);
756 if (newkvs == NULL &&
757 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
758 newkvs = chm->resize(topmap, kvs); // Force the copy to start
760 // Finish the copy and then put it in the new table
762 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
763 expVal), key_slot, val_slot, expVal);
765 // Decided to update the existing table
767 assert (!is_prime(V));
769 if (expVal != NO_MATCH_OLD &&
771 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
772 !(V == NULL && expVal == TOMBSTONE) &&
773 (expVal == NULL || !valeq(expVal, V))) {
776 @Commit_point_define: expVal == TOMBSTONE
777 @Potential_commit_point_label: Read_Val_Point
778 @Label: PutIfAbsent_Fail_Point
779 # This is a check for the PutIfAbsent() when the value
785 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
786 @Potential_commit_point_label: Read_Val_Point
787 @Label: RemoveIfMatch_Fail_Point
792 @Commit_point_define: !valeq(expVal, V)
793 @Potential_commit_point_label: Read_Val_Point
794 @Label: ReplaceIfMatch_Fail_Point
797 return V; // Do not update!
800 if (CAS_val(kvs, idx, V, val_slot)) {
803 # The only point where a successful put happens
804 @Commit_point_define: true
805 @Potential_commit_point_label: Write_Val_Point
806 @Label: Write_Success_Point
809 if (expVal != NULL) { // Not called by a table-copy
810 // CAS succeeded, should adjust size
811 // Both normal put's and table-copy calls putIfMatch, but
812 // table-copy does not increase the number of live K/V pairs
813 if ((V == NULL || V == TOMBSTONE) &&
814 val_slot != TOMBSTONE)
815 chm->_size.fetch_add(1, memory_order_relaxed);
816 if (!(V == NULL || V == TOMBSTONE) &&
817 val_slot == TOMBSTONE)
818 chm->_size.fetch_add(-1, memory_order_relaxed);
820 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
823 V = val(kvs, idx, NULL);
825 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
826 idx, expVal), key_slot, val_slot, expVal);
830 // Help along an existing table-resize. This is a fast cut-out wrapper.
831 kvs_data* help_copy(kvs_data *helper) {
832 kvs_data *topkvs = _kvs.load(memory_order_acquire);
833 CHM *topchm = get_chm(topkvs);
834 // No cpy in progress
835 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
836 topchm->help_copy_impl(this, topkvs, false);