1 #ifndef CLIFFC_HASHTABLE_H
2 #define CLIFFC_HASHTABLE_H
10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
17 #include <cdsannotate.h>
18 #include <specannotation.h>
19 #include <model_memory.h>
25 This header file declares and defines a simplified version of Cliff Click's
26 NonblockingHashMap. It contains all the necessary structrues and main
27 functions. In simplified_cliffc_hashtable.cc file, it has the definition for
31 template<typename TypeK, typename TypeV>
32 class cliffc_hashtable;
35 Corresponding the the Object[] array in Cliff Click's Java implementation.
36 It keeps the first two slots for CHM (Hashtable control unit) and the hash
37 records (an array of hash used for fast negative key-equality check).
45 int real_size = sz * 2 + 2;
46 _data = new atomic<void*>[real_size];
47 // The control block should be initialized in resize()
48 // Init the hash record array
49 int *hashes = new int[_size];
51 for (i = 0; i < _size; i++) {
54 // Init the data to Null slot
55 for (i = 2; i < real_size; i++) {
56 _data[i].store(NULL, memory_order_relaxed);
58 _data[1].store(hashes, memory_order_relaxed);
62 int *hashes = (int*) _data[1].load(memory_order_relaxed);
72 slot(bool prime, void *ptr) {
80 TypeK must have defined function "int hashCode()" which return the hash
81 code for the its object, and "int equals(TypeK anotherKey)" which is
82 used to judge equality.
83 TypeK and TypeV should define their own copy constructor.
90 template<typename TypeK, typename TypeV>
91 class cliffc_hashtable {
93 # The synchronization we have for the hashtable gives us the property of
94 # serializability, so we should have a sequential hashtable when we check the
95 # correctness. The key thing is to identify all the commit point.
100 CLASS = cliffc_hashtable;
107 map = new_spec_table_default(equals_key);
108 id_map = new_spec_table_default(equals_id);
112 bool equals_key(void *ptr1, void *ptr2) {
113 TypeK *key1 = (TypeK*) ptr1,
114 *key2 = (TypeK*) ptr2;
115 if (key1 == NULL || key2 == NULL)
117 return key1->equals(key2);
121 bool equals_val(void *ptr1, void *ptr2) {
124 TypeV *val1 = (TypeV*) ptr1,
125 *val2 = (TypeV*) ptr2;
126 if (val1 == NULL || val2 == NULL)
128 return val1->equals(val2);
132 bool equals_id(void *ptr1, void *ptr2) {
133 id_tag_t *id1 = (id_tag_t*) ptr1,
134 *id2 = (id_tag_t*) ptr2;
135 if (id1 == NULL || id2 == NULL)
137 return (*id1).tag == (*id2).tag;
141 # Update the tag for the current key slot if the corresponding tag
142 # is NULL, otherwise just return that tag. It will update the next
143 # available tag too if it requires a new tag for that key slot.
144 call_id_t getKeyTag(TypeK *key) {
145 if (!spec_table_contains(id_map, key)) {
146 call_id_t cur_id = current(tag);
147 spec_table_put(id_map, key, (void*) cur_id);
151 call_id_t res = (call_id_t) spec_table_get(id_map, key);
163 The control structure for the hashtable
167 friend class cliffc_hashtable;
169 atomic<kvs_data*> _newkvs;
171 // Size of active K,V pairs
174 // Count of used slots
177 // The next part of the table to copy
178 atomic_int _copy_idx;
180 // Work-done reporting
181 atomic_int _copy_done;
185 _newkvs.store(NULL, memory_order_relaxed);
186 _size.store(size, memory_order_relaxed);
187 _slots.store(0, memory_order_relaxed);
189 _copy_idx.store(0, memory_order_relaxed);
190 _copy_done.store(0, memory_order_relaxed);
197 // Heuristic to decide if the table is too full
198 bool table_full(int reprobe_cnt, int len) {
200 reprobe_cnt >= REPROBE_LIMIT &&
201 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
204 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
205 //model_print("resizing...\n");
206 /**** FIXME: miss ****/
207 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
211 // No copy in-progress, start one; Only double the table size
212 int oldlen = kvs->_size;
213 int sz = _size.load(memory_order_relaxed);
216 // Just follow Cliff Click's heuristic to decide the new size
217 if (sz >= (oldlen >> 2)) { // If we are 25% full
218 newsz = oldlen << 1; // Double size
219 if (sz >= (oldlen >> 1))
220 newsz = oldlen << 2; // Double double size
223 // We do not record the record timestamp
224 if (newsz <= oldlen) newsz = oldlen << 1;
225 // Do not shrink ever
226 if (newsz < oldlen) newsz = oldlen;
228 // Last check cause the 'new' below is expensive
229 /**** FIXME: miss ****/
230 newkvs = _newkvs.load(memory_order_acquire);
231 //model_print("hey1\n");
232 if (newkvs != NULL) return newkvs;
234 newkvs = new kvs_data(newsz);
235 void *chm = (void*) new CHM(sz);
236 //model_print("hey2\n");
237 newkvs->_data[0].store(chm, memory_order_relaxed);
239 kvs_data *cur_newkvs;
240 // Another check after the slow allocation
241 /**** FIXME: miss ****/
242 if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
244 // CAS the _newkvs to the allocated table
245 kvs_data *desired = (kvs_data*) NULL;
246 kvs_data *expected = (kvs_data*) newkvs;
247 /**** FIXME: miss ****/
248 //model_print("release in resize!\n");
249 if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
250 memory_order_relaxed)) {
251 // Should clean the allocated area
253 /**** FIXME: miss ****/
254 newkvs = _newkvs.load(memory_order_acquire);
259 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
261 MODEL_ASSERT (get_chm(oldkvs) == this);
262 /**** FIXME: miss ****/
263 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
264 int oldlen = oldkvs->_size;
265 int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
267 // Just follow Cliff Click's code here
268 int panic_start = -1;
270 while (_copy_done.load(memory_order_relaxed) < oldlen) {
271 copyidx = _copy_idx.load(memory_order_relaxed);
272 if (panic_start == -1) { // No painc
273 copyidx = _copy_idx.load(memory_order_relaxed);
274 while (copyidx < (oldlen << 1) &&
275 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
276 min_copy_work, memory_order_relaxed, memory_order_relaxed))
277 copyidx = _copy_idx.load(memory_order_relaxed);
278 if (!(copyidx < (oldlen << 1)))
279 panic_start = copyidx;
282 // Now copy the chunk of work we claimed
284 for (int i = 0; i < min_copy_work; i++)
285 if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
289 copy_check_and_promote(topmap, oldkvs, workdone);
291 copyidx += min_copy_work;
292 if (!copy_all && panic_start == -1)
293 return; // We are done with the work we claim
295 copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
298 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
299 *oldkvs, int idx, void *should_help) {
300 /**** FIXME: miss ****/
301 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
302 // We're only here cause the caller saw a Prime
303 if (copy_slot(topmap, idx, oldkvs, newkvs))
304 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
305 return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
308 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
309 oldkvs, int workdone) {
310 int oldlen = oldkvs->_size;
311 int copyDone = _copy_done.load(memory_order_relaxed);
314 copyDone = _copy_done.load(memory_order_relaxed);
315 if (_copy_done.compare_exchange_weak(copyDone, copyDone +
316 workdone, memory_order_relaxed, memory_order_relaxed))
321 // Promote the new table to the current table
322 if (copyDone + workdone == oldlen &&
323 topmap->_kvs.load(memory_order_relaxed) == oldkvs) {
324 /**** FIXME: miss ****/
325 kvs_data *newkvs = _newkvs.load(memory_order_acquire);
326 /**** CDSChecker error ****/
327 topmap->_kvs.compare_exchange_strong(oldkvs, newkvs, memory_order_release,
328 memory_order_relaxed);
332 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
335 while ((key_slot = key(oldkvs, idx)) == NULL)
336 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
338 // First CAS old to Prime
339 slot *oldval = val(oldkvs, idx);
340 while (!is_prime(oldval)) {
341 slot *box = (oldval == NULL || oldval == TOMBSTONE)
342 ? TOMBPRIME : new slot(true, oldval->_ptr);
343 if (CAS_val(oldkvs, idx, oldval, box)) {
344 if (box == TOMBPRIME)
345 return 1; // Copy done
346 // Otherwise we CAS'd the box
347 oldval = box; // Record updated oldval
350 oldval = val(oldkvs, idx); // Else re-try
353 if (oldval == TOMBPRIME) return false; // Copy already completed here
355 slot *old_unboxed = new slot(false, oldval->_ptr);
356 int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
359 // Old value is exposed in the new table
360 while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
361 oldval = val(oldkvs, idx);
363 return copied_into_new;
370 static const int Default_Init_Size = 4; // Intial table size
372 static slot* const MATCH_ANY;
373 static slot* const NO_MATCH_OLD;
375 static slot* const TOMBPRIME;
376 static slot* const TOMBSTONE;
378 static const int REPROBE_LIMIT = 10; // Forces a table-resize
380 atomic<kvs_data*> _kvs;
384 // Should initialize the CHM for the construction of the table
385 // For other CHM in kvs_data, they should be initialzed in resize()
386 // because the size is determined dynamically
392 kvs_data *kvs = new kvs_data(Default_Init_Size);
393 void *chm = (void*) new CHM(0);
394 kvs->_data[0].store(chm, memory_order_relaxed);
395 _kvs.store(kvs, memory_order_relaxed);
398 cliffc_hashtable(int init_size) {
399 // Should initialize the CHM for the construction of the table
400 // For other CHM in kvs_data, they should be initialzed in resize()
401 // because the size is determined dynamically
408 kvs_data *kvs = new kvs_data(init_size);
409 void *chm = (void*) new CHM(0);
410 kvs->_data[0].store(chm, memory_order_relaxed);
411 _kvs.store(kvs, memory_order_relaxed);
417 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_ReadKVS | Get_ReadNewKVS | Get_Clear
418 @Commit_point_set: Get_Point1 | Get_Point2 | Get_Clear
419 //@Commit_point_set: Get_Point1 | Get_Point2 | Get_Point3
422 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
423 //bool passed = equals_val(_Old_Val, __RET__);
426 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
427 int ret = __RET__ == NULL ? 0 : __RET__->_val;
428 //model_print("Get: key: %d, _Old_Val: %d, RET: %d\n",
429 //key->_val, old, ret);
432 //__RET__ == NULL ? true : equals_val(_Old_Val, __RET__)
433 equals_val(_Old_Val, __RET__)
436 TypeV* get(TypeK *key) {
437 slot *key_slot = new slot(false, key);
438 int fullhash = hash(key_slot);
439 /**** CDSChecker error ****/
440 kvs_data *kvs = _kvs.load(memory_order_acquire);
443 @Commit_point_define_check: true
447 slot *V = get_impl(this, kvs, key_slot, fullhash);
448 if (V == NULL) return NULL;
449 MODEL_ASSERT (!is_prime(V));
450 return (TypeV*) V->_ptr;
456 //@Commit_point_set: Put_Point | Put_ReadKVS | Put_ReadNewKVS | Put_WriteKey
457 @Commit_point_set: Put_Point | Put_WriteKey
460 # Remember this old value at checking point
461 TypeV *_Old_Val = (TypeV*) spec_table_get(map, key);
462 spec_table_put(map, key, val);
463 //bool passed = equals_val(__RET__, _Old_Val);
466 int old = _Old_Val == NULL ? 0 : _Old_Val->_val;
467 int ret = __RET__ == NULL ? 0 : __RET__->_val;
468 //model_print("Put: key: %d, val: %d, _Old_Val: %d, RET: %d\n",
469 // key->_val, val->_val, old, ret);
472 equals_val(__RET__, _Old_Val)
475 TypeV* put(TypeK *key, TypeV *val) {
476 return putIfMatch(key, val, NO_MATCH_OLD);
481 @Interface: PutIfAbsent
483 Write_Success_Point | PutIfAbsent_Fail_Point
484 @Condition: !spec_table_contains(map, key)
486 COND_PutIfAbsentSucc :: __RET__ == NULL
489 void *_Old_Val = spec_table_get(map, key);
491 spec_table_put(map, key, value);
493 __COND_SAT__ ? __RET__ == NULL : equals_val(_Old_Val, __RET__)
496 TypeV* putIfAbsent(TypeK *key, TypeV *value) {
497 return putIfMatch(key, val, TOMBSTONE);
502 @Interface: RemoveAny
503 @Commit_point_set: Write_Success_Point
506 void *_Old_Val = spec_table_get(map, key);
507 spec_table_put(map, key, NULL);
509 equals_val(__RET__, _Old_Val)
512 TypeV* remove(TypeK *key) {
513 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
518 @Interface: RemoveIfMatch
520 Write_Success_Point | RemoveIfMatch_Fail_Point
522 equals_val(spec_table_get(map, key), val)
524 COND_RemoveIfMatchSucc :: __RET__ == true
528 spec_table_put(map, key, NULL);
530 __COND_SAT__ ? __RET__ : !__RET__
533 bool remove(TypeK *key, TypeV *val) {
534 slot *val_slot = val == NULL ? NULL : new slot(false, val);
535 return putIfMatch(key, TOMBSTONE, val) == val;
541 @Interface: ReplaceAny
546 void *_Old_Val = spec_table_get(map, key);
548 equals_val(__RET__, _Old_Val)
551 TypeV* replace(TypeK *key, TypeV *val) {
552 return putIfMatch(key, val, MATCH_ANY);
557 @Interface: ReplaceIfMatch
559 Write_Success_Point | ReplaceIfMatch_Fail_Point
561 equals_val(spec_table_get(map, key), oldval)
563 COND_ReplaceIfMatchSucc :: __RET__ == true
567 spec_table_put(map, key, newval);
569 __COND_SAT__ ? __RET__ : !__RET__
572 bool replace(TypeK *key, TypeV *oldval, TypeV *newval) {
573 return putIfMatch(key, newval, oldval) == oldval;
577 static CHM* get_chm(kvs_data* kvs) {
578 CHM *res = (CHM*) kvs->_data[0].load(memory_order_relaxed);
582 static int* get_hashes(kvs_data *kvs) {
583 return (int *) kvs->_data[1].load(memory_order_relaxed);
586 // Preserve happens-before semantics on newly inserted keys
587 static inline slot* key(kvs_data *kvs, int idx) {
588 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
589 // Corresponding to the volatile read in get_impl() and putIfMatch in
590 // Cliff Click's Java implementation
591 slot *res = (slot*) kvs->_data[idx * 2 + 2].load(memory_order_relaxed);
594 # This is a complicated potential commit point since many many functions are
596 @Potential_commit_point_define: true
597 @Label: Read_Key_Point
604 The atomic operation in val() function is a "potential" commit point,
605 which means in some case it is a real commit point while it is not for
606 some other cases. This so happens because the val() function is such a
607 fundamental function that many internal operation will call. Our
608 strategy is that we label any potential commit points and check if they
609 really are the commit points later.
611 // Preserve happens-before semantics on newly inserted values
612 static inline slot* val(kvs_data *kvs, int idx) {
613 MODEL_ASSERT (idx >= 0 && idx < kvs->_size);
614 // Corresponding to the volatile read in get_impl() and putIfMatch in
615 // Cliff Click's Java implementation
616 /**** CDSChecker error & hb violation ****/
617 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
620 # This is a complicated potential commit point since many many functions are
622 @Potential_commit_point_define: true
623 @Label: Read_Val_Point
631 static int hash(slot *key_slot) {
632 MODEL_ASSERT(key_slot != NULL && key_slot->_ptr != NULL);
633 TypeK* key = (TypeK*) key_slot->_ptr;
634 int h = key->hashCode();
635 // Spread bits according to Cliff Click's code
636 h += (h << 15) ^ 0xffffcd7d;
640 h += (h << 2) + (h << 14);
641 return h ^ (h >> 16);
644 // Heuristic to decide if reprobed too many times.
645 // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
646 // put it triggers a table resize. Several places MUST have exact agreement.
647 static int reprobe_limit(int len) {
648 return REPROBE_LIMIT + (len >> 2);
651 static inline bool is_prime(slot *val) {
652 return (val != NULL) && val->_prime;
655 // Check for key equality. Try direct pointer comparison first (fast
656 // negative teset) and then the full 'equals' call
657 static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
659 // Caller should've checked this.
660 MODEL_ASSERT (K != NULL);
661 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
664 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
666 key_ptr->equals(K->_ptr));
669 static bool valeq(slot *val_slot1, slot *val_slot2) {
670 MODEL_ASSERT (val_slot1 != NULL);
671 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
672 if (val_slot2 == NULL || ptr1 == NULL) return false;
673 return ptr1->equals(val_slot2->_ptr);
676 // Together with key() preserve the happens-before relationship on newly
678 static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
679 bool res = kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
680 desired, memory_order_relaxed, memory_order_relaxed);
682 # If it is a successful put instead of a copy or any other internal
683 # operantions, expected != NULL
685 @Potential_commit_point_define: res
686 @Label: Write_Key_Point
693 Same as the val() function, we only label the CAS operation as the
694 potential commit point.
696 // Together with val() preserve the happens-before relationship on newly
698 static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
700 /**** CDSChecker error & HB violation ****/
701 bool res = kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
702 desired, memory_order_acq_rel, memory_order_relaxed);
704 # If it is a successful put instead of a copy or any other internal
705 # operantions, expected != NULL
707 @Potential_commit_point_define: res
708 @Label: Write_Val_Point
714 slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
716 int len = kvs->_size;
717 CHM *chm = get_chm(kvs);
718 int *hashes = get_hashes(kvs);
720 int idx = fullhash & (len - 1);
723 slot *K = key(kvs, idx);
726 @Commit_point_define: K == NULL
727 @Potential_commit_point_label: Read_Key_Point
731 slot *V = val(kvs, idx);
734 //model_print("Key is null\n");
735 return NULL; // A miss
738 if (keyeq(K, key_slot, hashes, idx, fullhash)) {
739 // Key hit! Check if table-resize in progress
743 @Commit_point_clear: true
750 @Commit_point_define: true
751 @Potential_commit_point_label: Read_Val_Point
755 return (V == TOMBSTONE) ? NULL : V; // Return this value
757 // Otherwise, finish the copy & retry in the new table
758 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
759 idx, key_slot), key_slot, fullhash);
762 if (++reprobe_cnt >= REPROBE_LIMIT ||
763 key_slot == TOMBSTONE) {
764 // Retry in new table
765 // Atomic read can be here
766 /**** FIXME: miss ****/
767 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
770 @Commit_point_define_check: true
771 @Label: Get_ReadNewKVS
774 return newkvs == NULL ? NULL : get_impl(topmap,
775 topmap->help_copy(newkvs), key_slot, fullhash);
778 idx = (idx + 1) & (len - 1); // Reprobe by 1
782 // A wrapper of the essential function putIfMatch()
783 TypeV* putIfMatch(TypeK *key, TypeV *value, slot *old_val) {
784 // TODO: Should throw an exception rather return NULL
785 if (old_val == NULL) {
788 slot *key_slot = new slot(false, key);
790 slot *value_slot = new slot(false, value);
791 /**** FIXME: miss ****/
792 kvs_data *kvs = _kvs.load(memory_order_acquire);
795 @Commit_point_define_check: true
799 slot *res = putIfMatch(this, kvs, key_slot, value_slot, old_val);
800 // Only when copy_slot() call putIfMatch() will it return NULL
801 MODEL_ASSERT (res != NULL);
802 MODEL_ASSERT (!is_prime(res));
803 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
807 Put, Remove, PutIfAbsent, etc will call this function. Return the old
808 value. If the returned value is equals to the expVal (or expVal is
809 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
810 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
811 NULL if passed a NULL expVal.
813 static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
814 *key_slot, slot *val_slot, slot *expVal) {
815 MODEL_ASSERT (val_slot != NULL);
816 MODEL_ASSERT (!is_prime(val_slot));
817 MODEL_ASSERT (!is_prime(expVal));
819 int fullhash = hash(key_slot);
820 int len = kvs->_size;
821 CHM *chm = get_chm(kvs);
822 int *hashes = get_hashes(kvs);
823 int idx = fullhash & (len - 1);
831 while (true) { // Spin till we get a key slot
834 if (K == NULL) { // Get a free slot
835 if (val_slot == TOMBSTONE) return val_slot;
836 // Claim the null key-slot
837 if (CAS_key(kvs, idx, NULL, key_slot)) {
840 @Commit_point_define: true
841 @Potential_commit_point_label: Write_Key_Point
845 chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
846 hashes[idx] = fullhash; // Memorize full hash
849 K = key(kvs, idx); // CAS failed, get updated value
850 MODEL_ASSERT (K != NULL);
853 // Key slot not null, there exists a Key here
854 if (keyeq(K, key_slot, hashes, idx, fullhash))
857 // Notice that the logic here should be consistent with that of get.
858 // The first predicate means too many reprobes means nothing in the
860 if (++reprobe_cnt >= reprobe_limit(len) ||
861 K == TOMBSTONE) { // Found a Tombstone key, no more keys
862 newkvs = chm->resize(topmap, kvs);
863 //model_print("resize1\n");
864 // Help along an existing copy
865 if (expVal != NULL) topmap->help_copy(newkvs);
866 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
869 idx = (idx + 1) & (len - 1); // Reprobe
870 } // End of spinning till we get a Key slot
872 if (val_slot == V) return V; // Fast cutout for no-change
874 // Here it tries to resize cause it doesn't want other threads to stop
875 // its progress (eagerly try to resize soon)
876 /**** FIXME: miss ****/
877 newkvs = chm->_newkvs.load(memory_order_acquire);
880 @Commit_point_define_check: true
881 @Label: Put_ReadNewKVS
884 if (newkvs == NULL &&
885 ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V))) {
886 //model_print("resize2\n");
887 newkvs = chm->resize(topmap, kvs); // Force the copy to start
890 // Finish the copy and then put it in the new table
892 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
893 expVal), key_slot, val_slot, expVal);
895 // Decided to update the existing table
897 MODEL_ASSERT (!is_prime(V));
899 if (expVal != NO_MATCH_OLD &&
901 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
902 !(V == NULL && expVal == TOMBSTONE) &&
903 (expVal == NULL || !valeq(expVal, V))) {
906 @Commit_point_define: expVal == TOMBSTONE
907 @Potential_commit_point_label: Read_Val_Point
908 @Label: PutIfAbsent_Fail_Point
909 # This is a check for the PutIfAbsent() when the value
915 @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
916 @Potential_commit_point_label: Read_Val_Point
917 @Label: RemoveIfMatch_Fail_Point
922 @Commit_point_define: expVal != NULL && !valeq(expVal, V)
923 @Potential_commit_point_label: Read_Val_Point
924 @Label: ReplaceIfMatch_Fail_Point
927 return V; // Do not update!
930 if (CAS_val(kvs, idx, V, val_slot)) {
933 # The only point where a successful put happens
934 @Commit_point_define: true
935 @Potential_commit_point_label: Write_Val_Point
939 if (expVal != NULL) { // Not called by a table-copy
940 // CAS succeeded, should adjust size
941 // Both normal put's and table-copy calls putIfMatch, but
942 // table-copy does not increase the number of live K/V pairs
943 if ((V == NULL || V == TOMBSTONE) &&
944 val_slot != TOMBSTONE)
945 chm->_size.fetch_add(1, memory_order_relaxed);
946 if (!(V == NULL || V == TOMBSTONE) &&
947 val_slot == TOMBSTONE)
948 chm->_size.fetch_add(-1, memory_order_relaxed);
950 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
955 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
956 idx, expVal), key_slot, val_slot, expVal);
960 // Help along an existing table-resize. This is a fast cut-out wrapper.
961 kvs_data* help_copy(kvs_data *helper) {
962 /**** FIXME: miss ****/
963 kvs_data *topkvs = _kvs.load(memory_order_acquire);
964 CHM *topchm = get_chm(topkvs);
965 // No cpy in progress
966 if (topchm->_newkvs.load(memory_order_relaxed) == NULL) return helper;
967 topchm->help_copy_impl(this, topkvs, false);