1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
17 #include <cdsannotate.h>
18 #include <specannotation.h>
19 #include <model_memory.h>
25 This header file declares and defines a simplified version of Cliff Click's
26 NonblockingHashMap. It contains all the necessary structrues and main
27 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
31 template<typename TypeK, typename TypeV>
32 class cliffc_hashtable;
35 Corresponding the the Object[] array in Cliff Click's Java implementation.
36 It keeps the first two slots for CHM (Hashtable control unit) and the hash
37 records (an array of hash used for fast negative key-equality check).
45 int real_size = sz * 2 + 2;
46 _data = new atomic<void*>[real_size];
47 // The control block should be initialized in resize()
48 // Init the hash record array
49 int *hashes = new int[_size];
51 for (i = 0; i < _size; i++) {
54 // Init the data to Null slot
55 for (i = 2; i < real_size; i++) {
56 _data[i].store(NULL, memory_order_relaxed);
58 _data[1].store(hashes, memory_order_relaxed);
62 int *hashes = (int*) _data[1].load(memory_order_relaxed);
72 slot(bool prime, void *ptr) {
80 TypeK must have defined function "int hashCode()" which return the hash
81 code for the its object, and "int equals(TypeK anotherKey)" which is
82 used to judge equality.
83 TypeK and TypeV should define their own copy constructor.
90 template<typename TypeK, typename TypeV>
91 class cliffc_hashtable {
93 # The synchronization we have for the hashtable gives us the property of
94 # serializability, so we should have a sequential hashtable when we check the
95 # correctness. The key thing is to identify all the commit point.
100 CLASS = cliffc_hashtable;
107 map = new_spec_table_default(equals_key);
108 id_map = new_spec_table_default(equals_id);
111 // model_print("cleaning up\n");
113 bool equals_key(void *ptr1, void *ptr2) {
114 TypeK *key1 = (TypeK*) ptr1, *key2 = (TypeK*) ptr2;
115 if (key1 == NULL || key2 == NULL)
117 return key1->equals(key2);
121 bool equals_val(void *ptr1, void *ptr2) {
124 TypeV *val1 = (TypeV*) ptr1, *val2 = (TypeV*) ptr2;
125 if (val1 == NULL || val2 == NULL)
127 return val1->equals(val2);
131 bool equals_id(void *ptr1, void *ptr2) {
132 id_tag_t *id1 = (id_tag_t*) ptr1, *id2 = (id_tag_t*) ptr2;
133 if (id1 == NULL || id2 == NULL)
135 return (*id1).tag == (*id2).tag;
139 # Update the tag for the current key slot if the corresponding tag
140 # is NULL, otherwise just return that tag. It will update the next
141 # available tag too if it requires a new tag for that key slot.
142 call_id_t getKeyTag(TypeK *key) {
143 if (!spec_table_contains(id_map, key)) {
144 call_id_t cur_id = current(tag);
145 spec_table_put(id_map, key, (void*) cur_id);
149 call_id_t res = (call_id_t) spec_table_get(id_map, key);
161 The control structure for the hashtable
165 friend class cliffc_hashtable;
167 atomic<kvs_data*> _newkvs;
169 // Size of active K,V pairs
172 // Count of used slots
175 // The next part of the table to copy
176 atomic_int _copy_idx;
178 // Work-done reporting
179 atomic_int _copy_done;
183 _newkvs.store(NULL, memory_order_relaxed);
184 _size.store(size, memory_order_relaxed);
185 _slots.store(0, memory_order_relaxed);
187 _copy_idx.store(0, memory_order_relaxed);
188 _copy_done.store(0, memory_order_relaxed);
195 // Heuristic to decide if the table is too full
196 bool table_full(int reprobe_cnt, int len) {
198 reprobe_cnt >= REPROBE_LIMIT &&
199 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
202 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
203 //model_print("resizing...\n");
204 /**** FIXME: miss ****/
205 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
209 // No copy in-progress, start one; Only double the table size
210 int oldlen = kvs->_size;
211 int sz = _size.load(memory_order_relaxed);
214 // Just follow Cliff Click's heuristic to decide the new size
215 if (sz >= (oldlen >> 2)) { // If we are 25% full
216 newsz = oldlen << 1; // Double size
217 if (sz >= (oldlen >> 1))
218 newsz = oldlen << 2; // Double double size
221 // We do not record the record timestamp
222 if (newsz <= oldlen) newsz = oldlen << 1;
223 // Do not shrink ever
224 if (newsz < oldlen) newsz = oldlen;
226 // Last check cause the 'new' below is expensive
227 /**** FIXME: miss ****/
228 newkvs = _newkvs.load(memory_order_acquire);
229 //model_print("hey1\n");
230 if (newkvs != NULL) return newkvs;
232 newkvs = new kvs_data(newsz);
233 void *chm = (void*) new CHM(sz);
234 //model_print("hey2\n");
235 newkvs->_data[0].store(chm, memory_order_relaxed);
237 kvs_data *cur_newkvs;
238 // Another check after the slow allocation
239 /**** FIXME: miss ****/
240 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
242 // CAS the _newkvs to the allocated table
243 kvs_data *desired = (kvs_data*) NULL;
244 kvs_data *expected = (kvs_data*) newkvs;
245 /**** FIXME: miss ****/
246 //model_print("release in resize!\n");
247 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
248 memory_order_relaxed)) {
249 // Should clean the allocated area
251 /**** FIXME: miss ****/
252 newkvs = _newkvs.load(memory_order_acquire);
257 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
259 MODEL_ASSERT (get_chm(oldkvs) == this);
260 /**** FIXME: miss ****/
261 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
262 int oldlen = oldkvs->_size;
263 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
265 // Just follow Cliff Click's code here
266 int panic_start = -1;
268 while (_copy_done.load(memory_order_relaxed) < oldlen) {
269 copyidx = _copy_idx.load(memory_order_relaxed);
270 if (panic_start == -1) { // No painc
271 copyidx = _copy_idx.load(memory_order_relaxed);
272 while (copyidx < (oldlen << 1) &&
273 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
274 min_copy_work, memory_order_relaxed, memory_order_relaxed))
275 copyidx = _copy_idx.load(memory_order_relaxed);
276 if (!(copyidx < (oldlen << 1)))
277 panic_start = copyidx;
280 // Now copy the chunk of work we claimed
282 for (int i = 0; i < min_copy_work; i++)
283 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
287 copy_check_and_promote(topmap, oldkvs, workdone);
289 copyidx += min_copy_work;
290 if (!copy_all && panic_start == -1)
291 return; // We are done with the work we claim
293 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
296 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
297 *oldkvs, int idx, void *should_help) {
298 /**** FIXME: miss ****/
299 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
300 // We're only here cause the caller saw a Prime
301 if (copy_slot(topmap, idx, oldkvs, newkvs))
302 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
303 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
306 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
307 oldkvs, int workdone) {
308 int oldlen = oldkvs->_size;
309 int copyDone = _copy_done.load(memory_order_relaxed);
312 copyDone = _copy_done.load(memory_order_relaxed);
313 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
314 workdone, memory_order_relaxed, memory_order_relaxed))
319 // Promote the new table to the current table
320 if (copyDone + workdone == oldlen &&
321 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
322 /**** FIXME: miss ****/
323 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
324 /**** CDSChecker error ****/
325 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
326 memory_order_relaxed);
330 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
333 while ((key_slot = key(oldkvs, idx)) == NULL)
334 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
336 // First CAS old to Prime
337 slot *oldval = val(oldkvs, idx);
338 while (!is_prime(oldval)) {
339 slot *box = (oldval == NULL || oldval == TOMBSTONE)
340 ? TOMBPRIME : new slot(true, oldval->_ptr);
341 if (CAS_val(oldkvs, idx, oldval, box)) {
342 if (box == TOMBPRIME)
343 return 1; // Copy done
344 // Otherwise we CAS'd the box
345 oldval = box; // Record updated oldval
348 oldval = val(oldkvs, idx); // Else re-try
351 if (oldval == TOMBPRIME) return false; // Copy already completed here
353 slot *old_unboxed = new slot(false, oldval->_ptr);
354 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
357 // Old value is exposed in the new table
358 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
359 oldval = val(oldkvs, idx);
361 return copied_into_new;
368 static const int Default_Init_Size = 4; // Intial table size
370 static slot* const MATCH_ANY;
371 static slot* const NO_MATCH_OLD;
373 static slot* const TOMBPRIME;
374 static slot* const TOMBSTONE;
376 static const int REPROBE_LIMIT = 10; // Forces a table-resize
378 atomic<kvs_data*> _kvs;
382 // Should initialize the CHM for the construction of the table
383 // For other CHM in kvs_data, they should be initialzed in resize()
384 // because the size is determined dynamically
390 kvs_data *kvs = new kvs_data(Default_Init_Size);
391 void *chm = (void*) new CHM(0);
392 kvs->_data[0].store(chm, memory_order_relaxed);
393 _kvs.store(kvs, memory_order_relaxed);
396 cliffc_hashtable(int init_size) {
397 // Should initialize the CHM for the construction of the table
398 // For other CHM in kvs_data, they should be initialzed in resize()
399 // because the size is determined dynamically
406 kvs_data *kvs = new kvs_data(init_size);
407 void *chm = (void*) new CHM(0);
408 kvs->_data[0].store(chm, memory_order_relaxed);
409 _kvs.store(kvs, memory_order_relaxed);
415 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
416 @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
417 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
420 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
421 //bool passed = equals_val(_Old_Val, __RET__);
422 //bool passed = false;
424 //int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
425 //int ret = __RET__ == NULL ? 0 : __RET__->_val;
426 //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
427 //key->_val, old, ret);
430 //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
431 equals_val(_Old_Val, __RET__)
434 TypeV* get(TypeK *key) {
435 slot *key_slot = new slot(false, key);
436 int fullhash = hash(key_slot);
437 /**** CDSChecker error ****/
438 kvs_data *kvs = _kvs.load(memory_order_acquire);
441 @Commit_point_define_check: true
445 slot *V = get_impl(this, kvs, key_slot, fullhash);
446 if (V == NULL) return NULL;
447 MODEL_ASSERT (!is_prime(V));
448 return (TypeV*) V->_ptr;
454 //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
455 @Commit_point_set: Put_Point | Put_WriteKey
458 # Remember this old value at checking point
459 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
460 spec_table_put(map, key, val);
461 //bool passed = equals_val(__RET__, _Old_Val);
462 //bool passed = false;
464 //int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
465 //int ret = __RET__ == NULL ? 0 : __RET__->_val;
466 //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
467 // key->_val, val->_val, old, ret);
470 equals_val(__RET__, _Old_Val)
473 TypeV* put(TypeK *key, TypeV *val) {
474 return putIfMatch(key, val, NO_MATCH_OLD);
479 @Interface: PutIfAbsent
481 Write_Success_Point | PutIfAbsent_Fail_Point
482 @Condition: !spec_table_contains(map, key)
484 COND_PutIfAbsentSucc :: __RET__ == NULL
487 void *_Old_Val = spec_table_get(map, key);
489 spec_table_put(map, key, value);
491 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
494 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
495 return putIfMatch(key, val, TOMBSTONE);
500 @Interface: RemoveAny
501 @Commit_point_set: Write_Success_Point
504 void *_Old_Val = spec_table_get(map, key);
505 spec_table_put(map, key, NULL);
507 equals_val(__RET__, _Old_Val)
510 TypeV* remove(TypeK *key) {
511 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
516 @Interface: RemoveIfMatch
518 Write_Success_Point | RemoveIfMatch_Fail_Point
520 equals_val(spec_table_get(map, key), val)
522 COND_RemoveIfMatchSucc :: __RET__ == true
526 spec_table_put(map, key, NULL);
528 __COND_SAT__ ? __RET__ : !__RET__
531 bool remove(TypeK *key, TypeV *val) {
532 slot *val_slot = val == NULL ? NULL : new slot(false, val);
533 return putIfMatch(key, TOMBSTONE, val) == val;
539 @Interface: ReplaceAny
544 void *_Old_Val = spec_table_get(map, key);
546 equals_val(__RET__, _Old_Val)
549 TypeV* replace(TypeK *key, TypeV *val) {
550 return putIfMatch(key, val, MATCH_ANY);
555 @Interface: ReplaceIfMatch
557 Write_Success_Point | ReplaceIfMatch_Fail_Point
559 equals_val(spec_table_get(map, key), oldval)
561 COND_ReplaceIfMatchSucc :: __RET__ == true
565 spec_table_put(map, key, newval);
567 __COND_SAT__ ? __RET__ : !__RET__
570 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
571 return putIfMatch(key, newval, oldval) == oldval;
575 static CHM* get_chm(kvs_data* kvs) {
576 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
580 static int* get_hashes(kvs_data *kvs) {
581 return (int *) kvs->_data[1].load(memory_order_relaxed);
584 // Preserve happens-before semantics on newly inserted keys
585 static inline slot* key(kvs_data *kvs, int idx) {
586 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
587 // Corresponding to the volatile read in get_impl() and putIfMatch in
588 // Cliff Click's Java implementation
589 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
592 # This is a complicated potential commit point since many many functions are
594 @Potential_commit_point_define: true
595 @Label: Read_Key_Point
602 The atomic operation in val() function is a "potential" commit point,
603 which means in some case it is a real commit point while it is not for
604 some other cases. This so happens because the val() function is such a
605 fundamental function that many internal operation will call. Our
606 strategy is that we label any potential commit points and check if they
607 really are the commit points later.
609 // Preserve happens-before semantics on newly inserted values
610 static inline slot* val(kvs_data *kvs, int idx) {
611 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
612 // Corresponding to the volatile read in get_impl() and putIfMatch in
613 // Cliff Click's Java implementation
614 /**** CDSChecker error & hb violation ****/
615 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
618 # This is a complicated potential commit point since many many functions are
620 @Potential_commit_point_define: true
621 @Label: Read_Val_Point
629 static int hash(slot *key_slot) {
630 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
631 TypeK* key = (TypeK*) key_slot->_ptr;
632 int h = key->hashCode();
633 // Spread bits according to Cliff Click's code
634 h += (h << 15) ^ 0xffffcd7d;
638 h += (h << 2) + (h << 14);
639 return h ^ (h >> 16);
642 // Heuristic to decide if reprobed too many times.
643 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
644 // put it triggers a table resize. Several places MUST have exact agreement.
645 static int reprobe_limit(int len) {
646 return REPROBE_LIMIT + (len >> 2);
649 static inline bool is_prime(slot *val) {
650 return (val != NULL) && val->_prime;
653 // Check for key equality. Try direct pointer comparison first (fast
654 // negative teset) and then the full 'equals' call
655 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
657 // Caller should've checked this.
658 MODEL_ASSERT (K != NULL);
659 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
662 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
664 key_ptr->equals(K->_ptr));
667 static bool valeq(slot *val_slot1, slot *val_slot2) {
668 MODEL_ASSERT (val_slot1 != NULL);
669 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
670 if (val_slot2 == NULL || ptr1 == NULL) return false;
671 return ptr1->equals(val_slot2->_ptr);
674 // Together with key() preserve the happens-before relationship on newly
676 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
677 bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
678 desired, memory_order_relaxed, memory_order_relaxed);
680 # If it is a successful put instead of a copy or any other internal
681 # operantions, expected != NULL
683 @Potential_commit_point_define: res
684 @Label: Write_Key_Point
691 Same as the val() function, we only label the CAS operation as the
692 potential commit point.
694 // Together with val() preserve the happens-before relationship on newly
696 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
698 /**** CDSChecker error & HB violation ****/
699 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
700 desired, memory_order_acq_rel, memory_order_relaxed);
702 # If it is a successful put instead of a copy or any other internal
703 # operantions, expected != NULL
705 @Potential_commit_point_define: res
706 @Label: Write_Val_Point
712 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
714 int len = kvs->_size;
715 CHM *chm = get_chm(kvs);
716 int *hashes = get_hashes(kvs);
718 int idx = fullhash & (len - 1);
721 slot *K = key(kvs, idx);
724 @Commit_point_define: K == NULL
725 @Potential_commit_point_label: Read_Key_Point
729 slot *V = val(kvs, idx);
732 //model_print("Key is null\n");
733 return NULL; // A miss
736 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
737 // Key hit! Check if table-resize in progress
741 @Commit_point_clear: true
748 @Commit_point_define: true
749 @Potential_commit_point_label: Read_Val_Point
753 return (V == TOMBSTONE) ? NULL : V; // Return this value
755 // Otherwise, finish the copy & retry in the new table
756 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
757 idx, key_slot), key_slot, fullhash);
760 if (++reprobe_cnt >= REPROBE_LIMIT ||
761 key_slot == TOMBSTONE) {
762 // Retry in new table
763 // Atomic read can be here
764 /**** FIXME: miss ****/
765 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
768 @Commit_point_define_check: true
769 @Label: Get_ReadNewKVS
772 return newkvs == NULL ? NULL : get_impl(topmap,
773 topmap->help_copy(newkvs), key_slot, fullhash);
776 idx = (idx + 1) & (len - 1); // Reprobe by 1
780 // A wrapper of the essential function putIfMatch()
781 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
782 // TODO: Should throw an exception rather return NULL
783 if (old_val == NULL) {
786 slot *key_slot = new slot(false, key);
788 slot *value_slot = new slot(false, value);
789 /**** FIXME: miss ****/
790 kvs_data *kvs = _kvs.load(memory_order_acquire);
793 @Commit_point_define_check: true
797 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
798 // Only when copy_slot() call putIfMatch() will it return NULL
799 MODEL_ASSERT (res != NULL);
800 MODEL_ASSERT (!is_prime(res));
801 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
805 Put, Remove, PutIfAbsent, etc will call this function. Return the old
806 value. If the returned value is equals to the expVal (or expVal is
807 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
808 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
809 NULL if passed a NULL expVal.
811 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
812 *key_slot, slot *val_slot, slot *expVal) {
813 MODEL_ASSERT (val_slot != NULL);
814 MODEL_ASSERT (!is_prime(val_slot));
815 MODEL_ASSERT (!is_prime(expVal));
817 int fullhash = hash(key_slot);
818 int len = kvs->_size;
819 CHM *chm = get_chm(kvs);
820 int *hashes = get_hashes(kvs);
821 int idx = fullhash & (len - 1);
829 while (true) { // Spin till we get a key slot
832 if (K == NULL) { // Get a free slot
833 if (val_slot == TOMBSTONE) return val_slot;
834 // Claim the null key-slot
835 if (CAS_key(kvs, idx, NULL, key_slot)) {
838 @Commit_point_define: true
839 @Potential_commit_point_label: Write_Key_Point
843 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
844 hashes[idx] = fullhash; // Memorize full hash
847 K = key(kvs, idx); // CAS failed, get updated value
848 MODEL_ASSERT (K != NULL);
851 // Key slot not null, there exists a Key here
852 if (keyeq(K, key_slot, hashes, idx, fullhash))
855 // Notice that the logic here should be consistent with that of get.
856 // The first predicate means too many reprobes means nothing in the
858 if (++reprobe_cnt >= reprobe_limit(len) ||
859 K == TOMBSTONE) { // Found a Tombstone key, no more keys
860 newkvs = chm->resize(topmap, kvs);
861 //model_print("resize1\n");
862 // Help along an existing copy
863 if (expVal != NULL) topmap->help_copy(newkvs);
864 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
867 idx = (idx + 1) & (len - 1); // Reprobe
868 } // End of spinning till we get a Key slot
870 if (val_slot == V) return V; // Fast cutout for no-change
872 // Here it tries to resize cause it doesn't want other threads to stop
873 // its progress (eagerly try to resize soon)
874 /**** FIXME: miss ****/
875 newkvs = chm->_newkvs.load(memory_order_acquire);
878 @Commit_point_define_check: true
879 @Label: Put_ReadNewKVS
882 if (newkvs == NULL &&
883 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
884 //model_print("resize2\n");
885 newkvs = chm->resize(topmap, kvs); // Force the copy to start
888 // Finish the copy and then put it in the new table
890 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
891 expVal), key_slot, val_slot, expVal);
893 // Decided to update the existing table
895 MODEL_ASSERT (!is_prime(V));
897 if (expVal != NO_MATCH_OLD &&
899 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
900 !(V == NULL && expVal == TOMBSTONE) &&
901 (expVal == NULL || !valeq(expVal, V))) {
904 @Commit_point_define: expVal == TOMBSTONE
905 @Potential_commit_point_label: Read_Val_Point
906 @Label: PutIfAbsent_Fail_Point
907 # This is a check for the PutIfAbsent() when the value
913 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
914 @Potential_commit_point_label: Read_Val_Point
915 @Label: RemoveIfMatch_Fail_Point
920 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
921 @Potential_commit_point_label: Read_Val_Point
922 @Label: ReplaceIfMatch_Fail_Point
925 return V; // Do not update!
928 if (CAS_val(kvs, idx, V, val_slot)) {
931 # The only point where a successful put happens
932 @Commit_point_define: true
933 @Potential_commit_point_label: Write_Val_Point
937 if (expVal != NULL) { // Not called by a table-copy
938 // CAS succeeded, should adjust size
939 // Both normal put's and table-copy calls putIfMatch, but
940 // table-copy does not increase the number of live K/V pairs
941 if ((V == NULL || V == TOMBSTONE) &&
942 val_slot != TOMBSTONE)
943 chm->_size.fetch_add(1, memory_order_relaxed);
944 if (!(V == NULL || V == TOMBSTONE) &&
945 val_slot == TOMBSTONE)
946 chm->_size.fetch_add(-1, memory_order_relaxed);
948 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
953 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
954 idx, expVal), key_slot, val_slot, expVal);
958 // Help along an existing table-resize. This is a fast cut-out wrapper.
959 kvs_data* help_copy(kvs_data *helper) {
960 /**** FIXME: miss ****/
961 kvs_data *topkvs = _kvs.load(memory_order_acquire);
962 CHM *topchm = get_chm(topkvs);
963 // No cpy in progress
964 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
965 topchm->help_copy_impl(this, topkvs, false);