1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
19 This header file declares and defines a simplified version of Cliff Click's
20 NonblockingHashMap. It contains all the necessary structrues and main
21 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
25 template<typename TypeK, typename TypeV>
26 class cliffc_hashtable;
29 Corresponding the the Object[] array in Cliff Click's Java implementation.
30 It keeps the first two slots for CHM (Hashtable control unit) and the hash
31 records (an array of hash used for fast negative key-equality check).
39 int real_size = sz * 2 + 2;
40 _data = new atomic<void*>[real_size];
41 // The control block should be initialized in resize()
42 // Init the hash record array
43 int *hashes = new int[_size];
45 for (i = 0; i < _size; i++) {
48 // Init the data to Null slot
49 for (i = 2; i < real_size; i++) {
50 _data[i].store(NULL, memory_order_relaxed);
52 _data[1].store(hashes, memory_order_relaxed);
56 int *hashes = (int*) _data[1].load(memory_order_relaxed);
66 slot(bool prime, void *ptr) {
74 TypeK must have defined function "int hashCode()" which return the hash
75 code for the its object, and "int equals(TypeK anotherKey)" which is
76 used to judge equality.
77 TypeK and TypeV should define their own copy constructor.
84 template<typename TypeK, typename TypeV>
85 class cliffc_hashtable {
87 # The synchronization we have for the hashtable gives us the property of
88 # serializability, so we should have a sequential hashtable when we check the
89 # correctness. The key thing is to identify all the commit point.
94 CLASS = cliffc_hashtable;
101 map = new_spec_table_default(equals_key);
102 id_map = new_spec_table_default(equals_id);
106 bool equals_key(void *ptr1, void *ptr2) {
107 TypeK *key1 = (TypeK*) ptr1,
108 *key2 = (TypeK*) ptr2;
109 if (key1 == NULL || key2 == NULL)
111 return key1->equals(key2);
115 bool equals_val(void *ptr1, void *ptr2) {
118 TypeV *val1 = (TypeV*) ptr1,
119 *val2 = (TypeV*) ptr2;
120 if (val1 == NULL || val2 == NULL)
122 return val1->equals(val2);
126 bool equals_id(void *ptr1, void *ptr2) {
127 id_tag_t *id1 = (id_tag_t*) ptr1,
128 *id2 = (id_tag_t*) ptr2;
129 if (id1 == NULL || id2 == NULL)
131 return (*id1).tag == (*id2).tag;
135 # Update the tag for the current key slot if the corresponding tag
136 # is NULL, otherwise just return that tag. It will update the next
137 # available tag too if it requires a new tag for that key slot.
138 call_id_t getKeyTag(TypeK *key) {
139 if (!spec_table_contains(id_map, key)) {
140 call_id_t cur_id = current(tag);
141 spec_table_put(id_map, key, (void*) cur_id);
145 call_id_t res = (call_id_t) spec_table_get(id_map, key);
157 The control structure for the hashtable
161 friend class cliffc_hashtable;
163 atomic<kvs_data*> _newkvs;
165 // Size of active K,V pairs
168 // Count of used slots
171 // The next part of the table to copy
172 atomic_int _copy_idx;
174 // Work-done reporting
175 atomic_int _copy_done;
179 _newkvs.store(NULL, memory_order_relaxed);
180 _size.store(size, memory_order_relaxed);
181 _slots.store(0, memory_order_relaxed);
183 _copy_idx.store(0, memory_order_relaxed);
184 _copy_done.store(0, memory_order_relaxed);
191 // Heuristic to decide if the table is too full
192 bool table_full(int reprobe_cnt, int len) {
194 reprobe_cnt >= REPROBE_LIMIT &&
195 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
198 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
199 //model_print("resizing...\n");
200 /**** FIXME: miss ****/
201 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
205 // No copy in-progress, start one; Only double the table size
206 int oldlen = kvs->_size;
207 int sz = _size.load(memory_order_relaxed);
210 // Just follow Cliff Click's heuristic to decide the new size
211 if (sz >= (oldlen >> 2)) { // If we are 25% full
212 newsz = oldlen << 1; // Double size
213 if (sz >= (oldlen >> 1))
214 newsz = oldlen << 2; // Double double size
217 // We do not record the record timestamp
218 if (newsz <= oldlen) newsz = oldlen << 1;
219 // Do not shrink ever
220 if (newsz < oldlen) newsz = oldlen;
222 // Last check cause the 'new' below is expensive
223 /**** FIXME: miss ****/
224 newkvs = _newkvs.load(memory_order_acquire);
225 //model_print("hey1\n");
226 if (newkvs != NULL) return newkvs;
228 newkvs = new kvs_data(newsz);
229 void *chm = (void*) new CHM(sz);
230 //model_print("hey2\n");
231 newkvs->_data[0].store(chm, memory_order_relaxed);
233 kvs_data *cur_newkvs;
234 // Another check after the slow allocation
235 /**** FIXME: miss ****/
236 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
238 // CAS the _newkvs to the allocated table
239 kvs_data *desired = (kvs_data*) NULL;
240 kvs_data *expected = (kvs_data*) newkvs;
241 /**** FIXME: miss ****/
242 //model_print("release in resize!\n");
243 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
244 memory_order_relaxed)) {
245 // Should clean the allocated area
247 /**** FIXME: miss ****/
248 newkvs = _newkvs.load(memory_order_acquire);
253 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
255 MODEL_ASSERT (get_chm(oldkvs) == this);
256 /**** FIXME: miss ****/
257 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
258 int oldlen = oldkvs->_size;
259 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
261 // Just follow Cliff Click's code here
262 int panic_start = -1;
264 while (_copy_done.load(memory_order_relaxed) < oldlen) {
265 copyidx = _copy_idx.load(memory_order_relaxed);
266 if (panic_start == -1) { // No painc
267 copyidx = _copy_idx.load(memory_order_relaxed);
268 while (copyidx < (oldlen << 1) &&
269 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
270 min_copy_work, memory_order_relaxed, memory_order_relaxed))
271 copyidx = _copy_idx.load(memory_order_relaxed);
272 if (!(copyidx < (oldlen << 1)))
273 panic_start = copyidx;
276 // Now copy the chunk of work we claimed
278 for (int i = 0; i < min_copy_work; i++)
279 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
283 copy_check_and_promote(topmap, oldkvs, workdone);
285 copyidx += min_copy_work;
286 if (!copy_all && panic_start == -1)
287 return; // We are done with the work we claim
289 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
292 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
293 *oldkvs, int idx, void *should_help) {
294 /**** FIXME: miss ****/
295 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
296 // We're only here cause the caller saw a Prime
297 if (copy_slot(topmap, idx, oldkvs, newkvs))
298 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
299 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
302 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
303 oldkvs, int workdone) {
304 int oldlen = oldkvs->_size;
305 int copyDone = _copy_done.load(memory_order_relaxed);
308 copyDone = _copy_done.load(memory_order_relaxed);
309 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
310 workdone, memory_order_relaxed, memory_order_relaxed))
315 // Promote the new table to the current table
316 if (copyDone + workdone == oldlen &&
317 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
318 /**** FIXME: miss ****/
319 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
320 /**** CDSChecker error ****/
321 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
322 memory_order_relaxed);
326 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
329 while ((key_slot = key(oldkvs, idx)) == NULL)
330 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
332 // First CAS old to Prime
333 slot *oldval = val(oldkvs, idx);
334 while (!is_prime(oldval)) {
335 slot *box = (oldval == NULL || oldval == TOMBSTONE)
336 ? TOMBPRIME : new slot(true, oldval->_ptr);
337 if (CAS_val(oldkvs, idx, oldval, box)) {
338 if (box == TOMBPRIME)
339 return 1; // Copy done
340 // Otherwise we CAS'd the box
341 oldval = box; // Record updated oldval
344 oldval = val(oldkvs, idx); // Else re-try
347 if (oldval == TOMBPRIME) return false; // Copy already completed here
349 slot *old_unboxed = new slot(false, oldval->_ptr);
350 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
353 // Old value is exposed in the new table
354 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
355 oldval = val(oldkvs, idx);
357 return copied_into_new;
364 static const int Default_Init_Size = 4; // Intial table size
366 static slot* const MATCH_ANY;
367 static slot* const NO_MATCH_OLD;
369 static slot* const TOMBPRIME;
370 static slot* const TOMBSTONE;
372 static const int REPROBE_LIMIT = 10; // Forces a table-resize
374 atomic<kvs_data*> _kvs;
378 // Should initialize the CHM for the construction of the table
379 // For other CHM in kvs_data, they should be initialzed in resize()
380 // because the size is determined dynamically
386 kvs_data *kvs = new kvs_data(Default_Init_Size);
387 void *chm = (void*) new CHM(0);
388 kvs->_data[0].store(chm, memory_order_relaxed);
389 _kvs.store(kvs, memory_order_relaxed);
392 cliffc_hashtable(int init_size) {
393 // Should initialize the CHM for the construction of the table
394 // For other CHM in kvs_data, they should be initialzed in resize()
395 // because the size is determined dynamically
402 kvs_data *kvs = new kvs_data(init_size);
403 void *chm = (void*) new CHM(0);
404 kvs->_data[0].store(chm, memory_order_relaxed);
405 _kvs.store(kvs, memory_order_relaxed);
411 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
412 @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
413 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
416 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
417 //bool passed = equals_val(_Old_Val, __RET__);
420 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
421 int ret = __RET__ == NULL ? 0 : __RET__->_val;
422 //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
423 //key->_val, old, ret);
426 //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
427 equals_val(_Old_Val, __RET__)
430 TypeV* get(TypeK *key) {
431 slot *key_slot = new slot(false, key);
432 int fullhash = hash(key_slot);
433 /**** CDSChecker error ****/
434 kvs_data *kvs = _kvs.load(memory_order_acquire);
437 @Commit_point_define_check: true
441 slot *V = get_impl(this, kvs, key_slot, fullhash);
442 if (V == NULL) return NULL;
443 MODEL_ASSERT (!is_prime(V));
444 return (TypeV*) V->_ptr;
450 //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
451 @Commit_point_set: Put_Point | Put_WriteKey
454 # Remember this old value at checking point
455 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
456 spec_table_put(map, key, val);
457 //bool passed = equals_val(__RET__, _Old_Val);
460 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
461 int ret = __RET__ == NULL ? 0 : __RET__->_val;
462 //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
463 // key->_val, val->_val, old, ret);
466 equals_val(__RET__, _Old_Val)
469 TypeV* put(TypeK *key, TypeV *val) {
470 return putIfMatch(key, val, NO_MATCH_OLD);
475 @Interface: PutIfAbsent
477 Write_Success_Point | PutIfAbsent_Fail_Point
478 @Condition: !spec_table_contains(map, key)
480 COND_PutIfAbsentSucc :: __RET__ == NULL
483 void *_Old_Val = spec_table_get(map, key);
485 spec_table_put(map, key, value);
487 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
490 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
491 return putIfMatch(key, val, TOMBSTONE);
496 @Interface: RemoveAny
497 @Commit_point_set: Write_Success_Point
500 void *_Old_Val = spec_table_get(map, key);
501 spec_table_put(map, key, NULL);
503 equals_val(__RET__, _Old_Val)
506 TypeV* remove(TypeK *key) {
507 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
512 @Interface: RemoveIfMatch
514 Write_Success_Point | RemoveIfMatch_Fail_Point
516 equals_val(spec_table_get(map, key), val)
518 COND_RemoveIfMatchSucc :: __RET__ == true
522 spec_table_put(map, key, NULL);
524 __COND_SAT__ ? __RET__ : !__RET__
527 bool remove(TypeK *key, TypeV *val) {
528 slot *val_slot = val == NULL ? NULL : new slot(false, val);
529 return putIfMatch(key, TOMBSTONE, val) == val;
535 @Interface: ReplaceAny
540 void *_Old_Val = spec_table_get(map, key);
542 equals_val(__RET__, _Old_Val)
545 TypeV* replace(TypeK *key, TypeV *val) {
546 return putIfMatch(key, val, MATCH_ANY);
551 @Interface: ReplaceIfMatch
553 Write_Success_Point | ReplaceIfMatch_Fail_Point
555 equals_val(spec_table_get(map, key), oldval)
557 COND_ReplaceIfMatchSucc :: __RET__ == true
561 spec_table_put(map, key, newval);
563 __COND_SAT__ ? __RET__ : !__RET__
566 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
567 return putIfMatch(key, newval, oldval) == oldval;
571 static CHM* get_chm(kvs_data* kvs) {
572 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
576 static int* get_hashes(kvs_data *kvs) {
577 return (int *) kvs->_data[1].load(memory_order_relaxed);
580 // Preserve happens-before semantics on newly inserted keys
581 static inline slot* key(kvs_data *kvs, int idx) {
582 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
583 // Corresponding to the volatile read in get_impl() and putIfMatch in
584 // Cliff Click's Java implementation
585 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
588 # This is a complicated potential commit point since many many functions are
590 @Potential_commit_point_define: true
591 @Label: Read_Key_Point
598 The atomic operation in val() function is a "potential" commit point,
599 which means in some case it is a real commit point while it is not for
600 some other cases. This so happens because the val() function is such a
601 fundamental function that many internal operation will call. Our
602 strategy is that we label any potential commit points and check if they
603 really are the commit points later.
605 // Preserve happens-before semantics on newly inserted values
606 static inline slot* val(kvs_data *kvs, int idx) {
607 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
608 // Corresponding to the volatile read in get_impl() and putIfMatch in
609 // Cliff Click's Java implementation
610 /**** CDSChecker error & hb violation ****/
611 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
614 # This is a complicated potential commit point since many many functions are
616 @Potential_commit_point_define: true
617 @Label: Read_Val_Point
625 static int hash(slot *key_slot) {
626 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
627 TypeK* key = (TypeK*) key_slot->_ptr;
628 int h = key->hashCode();
629 // Spread bits according to Cliff Click's code
630 h += (h << 15) ^ 0xffffcd7d;
634 h += (h << 2) + (h << 14);
635 return h ^ (h >> 16);
638 // Heuristic to decide if reprobed too many times.
639 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
640 // put it triggers a table resize. Several places MUST have exact agreement.
641 static int reprobe_limit(int len) {
642 return REPROBE_LIMIT + (len >> 2);
645 static inline bool is_prime(slot *val) {
646 return (val != NULL) && val->_prime;
649 // Check for key equality. Try direct pointer comparison first (fast
650 // negative teset) and then the full 'equals' call
651 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
653 // Caller should've checked this.
654 MODEL_ASSERT (K != NULL);
655 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
658 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
660 key_ptr->equals(K->_ptr));
663 static bool valeq(slot *val_slot1, slot *val_slot2) {
664 MODEL_ASSERT (val_slot1 != NULL);
665 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
666 if (val_slot2 == NULL || ptr1 == NULL) return false;
667 return ptr1->equals(val_slot2->_ptr);
670 // Together with key() preserve the happens-before relationship on newly
672 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
673 bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
674 desired, memory_order_relaxed, memory_order_relaxed);
676 # If it is a successful put instead of a copy or any other internal
677 # operantions, expected != NULL
679 @Potential_commit_point_define: res
680 @Label: Write_Key_Point
687 Same as the val() function, we only label the CAS operation as the
688 potential commit point.
690 // Together with val() preserve the happens-before relationship on newly
692 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
694 /**** CDSChecker error & HB violation ****/
695 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
696 desired, memory_order_acq_rel, memory_order_relaxed);
698 # If it is a successful put instead of a copy or any other internal
699 # operantions, expected != NULL
701 @Potential_commit_point_define: res
702 @Label: Write_Val_Point
708 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
710 int len = kvs->_size;
711 CHM *chm = get_chm(kvs);
712 int *hashes = get_hashes(kvs);
714 int idx = fullhash & (len - 1);
717 slot *K = key(kvs, idx);
720 @Commit_point_define: K == NULL
721 @Potential_commit_point_label: Read_Key_Point
725 slot *V = val(kvs, idx);
728 //model_print("Key is null\n");
729 return NULL; // A miss
732 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
733 // Key hit! Check if table-resize in progress
737 @Commit_point_clear: true
744 @Commit_point_define: true
745 @Potential_commit_point_label: Read_Val_Point
749 return (V == TOMBSTONE) ? NULL : V; // Return this value
751 // Otherwise, finish the copy & retry in the new table
752 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
753 idx, key_slot), key_slot, fullhash);
756 if (++reprobe_cnt >= REPROBE_LIMIT ||
757 key_slot == TOMBSTONE) {
758 // Retry in new table
759 // Atomic read can be here
760 /**** FIXME: miss ****/
761 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
764 @Commit_point_define_check: true
765 @Label: Get_ReadNewKVS
768 return newkvs == NULL ? NULL : get_impl(topmap,
769 topmap->help_copy(newkvs), key_slot, fullhash);
772 idx = (idx + 1) & (len - 1); // Reprobe by 1
776 // A wrapper of the essential function putIfMatch()
777 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
778 // TODO: Should throw an exception rather return NULL
779 if (old_val == NULL) {
782 slot *key_slot = new slot(false, key);
784 slot *value_slot = new slot(false, value);
785 /**** FIXME: miss ****/
786 kvs_data *kvs = _kvs.load(memory_order_acquire);
789 @Commit_point_define_check: true
793 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
794 // Only when copy_slot() call putIfMatch() will it return NULL
795 MODEL_ASSERT (res != NULL);
796 MODEL_ASSERT (!is_prime(res));
797 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
801 Put, Remove, PutIfAbsent, etc will call this function. Return the old
802 value. If the returned value is equals to the expVal (or expVal is
803 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
804 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
805 NULL if passed a NULL expVal.
807 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
808 *key_slot, slot *val_slot, slot *expVal) {
809 MODEL_ASSERT (val_slot != NULL);
810 MODEL_ASSERT (!is_prime(val_slot));
811 MODEL_ASSERT (!is_prime(expVal));
813 int fullhash = hash(key_slot);
814 int len = kvs->_size;
815 CHM *chm = get_chm(kvs);
816 int *hashes = get_hashes(kvs);
817 int idx = fullhash & (len - 1);
825 while (true) { // Spin till we get a key slot
828 if (K == NULL) { // Get a free slot
829 if (val_slot == TOMBSTONE) return val_slot;
830 // Claim the null key-slot
831 if (CAS_key(kvs, idx, NULL, key_slot)) {
834 @Commit_point_define: true
835 @Potential_commit_point_label: Write_Key_Point
839 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
840 hashes[idx] = fullhash; // Memorize full hash
843 K = key(kvs, idx); // CAS failed, get updated value
844 MODEL_ASSERT (K != NULL);
847 // Key slot not null, there exists a Key here
848 if (keyeq(K, key_slot, hashes, idx, fullhash))
851 // Notice that the logic here should be consistent with that of get.
852 // The first predicate means too many reprobes means nothing in the
854 if (++reprobe_cnt >= reprobe_limit(len) ||
855 K == TOMBSTONE) { // Found a Tombstone key, no more keys
856 newkvs = chm->resize(topmap, kvs);
857 //model_print("resize1\n");
858 // Help along an existing copy
859 if (expVal != NULL) topmap->help_copy(newkvs);
860 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
863 idx = (idx + 1) & (len - 1); // Reprobe
864 } // End of spinning till we get a Key slot
866 if (val_slot == V) return V; // Fast cutout for no-change
868 // Here it tries to resize cause it doesn't want other threads to stop
869 // its progress (eagerly try to resize soon)
870 /**** FIXME: miss ****/
871 newkvs = chm->_newkvs.load(memory_order_acquire);
874 @Commit_point_define_check: true
875 @Label: Put_ReadNewKVS
878 if (newkvs == NULL &&
879 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
880 //model_print("resize2\n");
881 newkvs = chm->resize(topmap, kvs); // Force the copy to start
884 // Finish the copy and then put it in the new table
886 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
887 expVal), key_slot, val_slot, expVal);
889 // Decided to update the existing table
891 MODEL_ASSERT (!is_prime(V));
893 if (expVal != NO_MATCH_OLD &&
895 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
896 !(V == NULL && expVal == TOMBSTONE) &&
897 (expVal == NULL || !valeq(expVal, V))) {
900 @Commit_point_define: expVal == TOMBSTONE
901 @Potential_commit_point_label: Read_Val_Point
902 @Label: PutIfAbsent_Fail_Point
903 # This is a check for the PutIfAbsent() when the value
909 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
910 @Potential_commit_point_label: Read_Val_Point
911 @Label: RemoveIfMatch_Fail_Point
916 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
917 @Potential_commit_point_label: Read_Val_Point
918 @Label: ReplaceIfMatch_Fail_Point
921 return V; // Do not update!
924 if (CAS_val(kvs, idx, V, val_slot)) {
927 # The only point where a successful put happens
928 @Commit_point_define: true
929 @Potential_commit_point_label: Write_Val_Point
933 if (expVal != NULL) { // Not called by a table-copy
934 // CAS succeeded, should adjust size
935 // Both normal put's and table-copy calls putIfMatch, but
936 // table-copy does not increase the number of live K/V pairs
937 if ((V == NULL || V == TOMBSTONE) &&
938 val_slot != TOMBSTONE)
939 chm->_size.fetch_add(1, memory_order_relaxed);
940 if (!(V == NULL || V == TOMBSTONE) &&
941 val_slot == TOMBSTONE)
942 chm->_size.fetch_add(-1, memory_order_relaxed);
944 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
949 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
950 idx, expVal), key_slot, val_slot, expVal);
954 // Help along an existing table-resize. This is a fast cut-out wrapper.
955 kvs_data* help_copy(kvs_data *helper) {
956 /**** FIXME: miss ****/
957 kvs_data *topkvs = _kvs.load(memory_order_acquire);
958 CHM *topchm = get_chm(topkvs);
959 // No cpy in progress
960 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
961 topchm->help_copy_impl(this, topkvs, false);