add cliff click hashtable
[model-checker-benchmarks.git] / cliffc-hashtable / cliffc_hashtable.h
1 #ifndef _CLIFFC_HASHTABLE_H
2 #define _CLIFFC_HASHTABLE_H
3
4 #include <iostream>
5 #include <atomic>
6 #include <memory>
7 #include <assert.h>
8
9 using namespace std;
10
11 /**
12         This header file declares and defines a simplified version of Cliff Click's
13         NonblockingHashMap. It contains all the necessary structrues and main
14         functions. In simplified_cliffc_hashtable.cc file, it has the definition for
15         the static fields.
16 */
17
18 template<typename TypeK, typename TypeV, class Hash, class KeyEqualsTo, class ValEqualsTo>
19 class cliffc_hashtable;
20
21 /**
22         Corresponding the the Object[] array in Cliff Click's Java implementation.
23         It keeps the first two slots for CHM (Hashtable control unit) and the hash
24         records (an array of hash used for fast negative key-equality check).
25 */
26 struct kvs_data {
27         int _size;
28         atomic<void*> *_data;
29         
30         kvs_data(int sz) {
31                 _size = sz;
32                 int real_size = sizeof(atomic<void*>) * 2 + 2;
33                 _data = new atomic<void*>[real_size];
34                 // The control block should be initialized in resize()
35                 // Init the hash record array
36                 int *hashes = new int[_size];
37                 int i;
38                 for (i = 0; i < _size; i++) {
39                         hashes[i] = 0;
40                 }
41                 
42                 // Initialize the array
43                 _data[0].store(NULL, memory_order_relaxed);
44                 _data[1].store(hashes, memory_order_relaxed);
45                 // Init the data to Null slot
46                 for (i = 2; i < real_size; i++) {
47                         _data[i].store(NULL, memory_order_relaxed);
48                 }
49         }
50
51         ~kvs_data() {
52                 int *hashes = (int*) _data[1].load(memory_order_relaxed);
53                 delete hashes;
54                 delete[] _data;
55         }
56 };
57
58 struct slot {
59         bool _prime;
60         void *_ptr;
61
62         slot(bool prime, void *ptr) {
63                 _prime = prime;
64                 _ptr = ptr;
65         }
66
67 };
68
69
70 /**
71         TypeK and TypeV should define their own copy constructor.
72 */
73 template<typename TypeK, typename TypeV, class Hash, class KeyEqualsTo, class ValEqualsTo>
74 class cliffc_hashtable {
75         /**
76                 # The synchronization we have for the hashtable gives us the property of
77                 # serializability, so we should have a sequential hashtable when we check the
78                 # correctness. The key thing is to identify all the commit point.
79         
80                 @Begin
81                 @Global_define:
82
83                         spec_hashtable<TypeK, TypeV*> __map;
84                         spec_hashtable<TypeK, Tag> __id_map;
85                         Tag __tag;
86
87                         static bool _val_equals(TypeV *ptr1, TypeV *ptr2) {
88                                 // ...
89                         }
90                         
91                         # Update the tag for the current key slot if the corresponding tag
92                         # is NULL, otherwise just return that tag. It will update the next
93                         # available tag too if it requires a new tag for that key slot.
94                         static Tag _getKeyTag(TypeK &key) {
95                                 if (__id_map.get(key) == NULL) {
96                                         Tag cur_tag = tag.current();
97                                         __id_map.put(key, cur_tag);
98                                         __tag.next();
99                                         return cur_tag;
100                                 } else {
101                                         return __id_map.get(key);
102                                 }
103                         }
104                 
105                 @Interface_cluster:
106                         Read_interface = {
107                                 Get,
108                                 PutIfAbsent,
109                                 RemoveAny,
110                                 RemoveIfMatch,
111                                 ReplaceAny,
112                                 ReplaceIfMatch
113                         }
114                         
115                         Write_interface = {
116                                 Put,
117                                 PutIfAbsent(COND_PutIfAbsentSucc),
118                                 RemoveAny,
119                                 RemoveIfMatch(COND_RemoveIfMatchSucc),
120                                 ReplaceAny,
121                                 ReplaceIfMatch(COND_ReplaceIfMatchSucc)
122                         }
123                 @Happens_before:
124                         Write_interface -> Read_interface
125                 @End
126         */
127
128 friend class CHM;
129         /**
130                 The control structure for the hashtable
131         */
132         private:
133         class CHM {
134                 friend class cliffc_hashtable;
135                 private:
136                 atomic<kvs_data*> _newkvs;
137                 
138                 // Size of active K,V pairs
139                 atomic_int _size;
140         
141                 // Count of used slots
142                 atomic_int _slots;
143                 
144                 // The next part of the table to copy
145                 atomic_int _copy_idx;
146                 
147                 // Work-done reporting
148                 atomic_int _copy_done;
149         
150                 public:
151                 CHM(int size) {
152                         _size.store(size, memory_order_relaxed);
153                         _slots.store(0, memory_order_relaxed);
154         
155                         _copy_idx.store(0, memory_order_relaxed);
156                         _copy_done.store(0, memory_order_relaxed);
157                 }
158         
159                 ~CHM() {}
160                 
161                 private:
162                         
163                 // Heuristic to decide if the table is too full
164                 bool table_full(int reprobe_cnt, int len) {
165                         return
166                                 reprobe_cnt >= REPROBE_LIMIT &&
167                                 _slots.load(memory_order_relaxed) >= reprobe_limit(len);
168                 }
169         
170                 kvs_data* resize(cliffc_hashtable *topmap, kvs_data *kvs) {
171                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
172                         if (newkvs != NULL)
173                                 return newkvs;
174         
175                         // No copy in-progress, start one; Only double the table size
176                         int oldlen = kvs->_size;
177                         int sz = _size.load(memory_order_relaxed);
178                         int newsz = sz;
179                         
180                         // Just follow Cliff Click's heuristic to decide the new size
181                         if (sz >= (oldlen >> 2)) { // If we are 25% full
182                                 newsz = oldlen << 1; // Double size
183                                 if (sz >= (oldlen >> 1))
184                                         newsz = oldlen << 2; // Double double size
185                         }
186         
187                         // We do not record the record timestamp
188                         if (newsz <= oldlen) newsz = oldlen << 1;
189                         // Do not shrink ever
190                         if (newsz < oldlen) newsz = oldlen;
191         
192                         // Last check cause the 'new' below is expensive
193                         newkvs = _newkvs.load(memory_order_acquire);
194                         if (newkvs != NULL) return newkvs;
195         
196                         newkvs = new kvs_data(newsz);
197                         void *chm = (void*) new CHM(sz);
198                         newkvs->_data[0].store(chm, memory_order_relaxed);
199         
200                         kvs_data *cur_newkvs; 
201                         // Another check after the slow allocation
202                         if ((cur_newkvs = _newkvs.load(memory_order_acquire)) != NULL)
203                                 return cur_newkvs;
204                         // CAS the _newkvs to the allocated table
205                         kvs_data *desired = (kvs_data*) NULL;
206                         kvs_data *expected = (kvs_data*) newkvs; 
207                         if (!_newkvs.compare_exchange_strong(desired, expected, memory_order_release,
208                                         memory_order_release)) {
209                                 // Should clean the allocated area
210                                 delete newkvs;
211                                 newkvs = _newkvs.load(memory_order_acquire);
212                         }
213                         return newkvs;
214                 }
215         
216                 void help_copy_impl(cliffc_hashtable *topmap, kvs_data *oldkvs,
217                         bool copy_all) {
218                         assert (get_chm(oldkvs) == this);
219                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
220                         int oldlen = oldkvs->_size;
221                         int min_copy_work = oldlen > 1024 ? 1024 : oldlen;
222                 
223                         // Just follow Cliff Click's code here
224                         int panic_start = -1;
225                         int copyidx;
226                         while (_copy_done.load(memory_order_acquire) < oldlen) {
227                                 copyidx = _copy_idx.load(memory_order_acquire);
228                                 if (panic_start == -1) { // No painc
229                                         copyidx = _copy_idx.load(memory_order_acquire);
230                                         while (copyidx < (oldlen << 1) &&
231                                                 !_copy_idx.compare_exchange_strong(copyidx, copyidx +
232                                                         min_copy_work, memory_order_release, memory_order_release))
233                                                 copyidx = _copy_idx.load(memory_order_relaxed);
234                                         if (!(copyidx < (oldlen << 1)))
235                                                 panic_start = copyidx;
236                                 }
237         
238                                 // Now copy the chunk of work we claimed
239                                 int workdone = 0;
240                                 for (int i = 0; i < min_copy_work; i++)
241                                         if (copy_slot(topmap, (copyidx + i) & (oldlen - 1), oldkvs,
242                                                 newkvs))
243                                                 workdone++;
244                                 if (workdone > 0)
245                                         copy_check_and_promote(topmap, oldkvs, workdone);
246         
247                                 copyidx += min_copy_work;
248                                 if (!copy_all && panic_start == -1)
249                                         return; // We are done with the work we claim
250                         }
251                         copy_check_and_promote(topmap, oldkvs, 0); // See if we can promote
252                 }
253         
254                 kvs_data* copy_slot_and_check(cliffc_hashtable *topmap, kvs_data
255                         *oldkvs, int idx, void *should_help) {
256                         kvs_data *newkvs = _newkvs.load(memory_order_acquire);
257                         // We're only here cause the caller saw a Prime
258                         if (copy_slot(topmap, idx, oldkvs,
259                                 _newkvs.load(memory_order_relaxed)))
260                                 copy_check_and_promote(topmap, oldkvs, 1); // Record the slot copied
261                         return (should_help == NULL) ? newkvs : topmap->help_copy(newkvs);
262                 }
263         
264                 void copy_check_and_promote(cliffc_hashtable *topmap, kvs_data*
265                         oldkvs, int workdone) {
266                         int oldlen = oldkvs->_size;
267                         int copyDone = _copy_done.load(memory_order_relaxed);
268                         if (workdone > 0) {
269                                 while (true) {
270                                         copyDone = _copy_done.load(memory_order_relaxed);
271                                         if (_copy_done.compare_exchange_weak(copyDone, copyDone +
272                                                 workdone, memory_order_relaxed, memory_order_relaxed))
273                                                 break;
274                                 }
275                         }
276         
277                         // Promote the new table to the current table
278                         if (copyDone + workdone == oldlen &&
279                                 topmap->_kvs.load(memory_order_acquire) == oldkvs)
280                                 topmap->_kvs.compare_exchange_strong(oldkvs,
281                                         _newkvs.load(memory_order_relaxed), memory_order_release,
282                                         memory_order_release);
283                 }
284         
285                 bool copy_slot(cliffc_hashtable *topmap, int idx, kvs_data *oldkvs,
286                         kvs_data *newkvs) {
287                         slot *key_slot;
288                         while ((key_slot = key(oldkvs, idx)) == NULL)
289                                 CAS_key(oldkvs, idx, NULL, TOMBSTONE);
290         
291                         // First CAS old to Prime
292                         slot *oldval = val(oldkvs, idx);
293                         while (!is_prime(oldval)) {
294                                 slot *box = (oldval == NULL || oldval == TOMBSTONE)
295                                         ? TOMBPRIME : new slot(true, oldval->_ptr);
296                                 if (CAS_val(oldkvs, idx, oldval, box)) {
297                                         if (box == TOMBPRIME)
298                                                 return 1; // Copy done
299                                         // Otherwise we CAS'd the box
300                                         oldval = box; // Record updated oldval
301                                         break;
302                                 }
303                                 oldval = val(oldkvs, idx); // Else re-try
304                         }
305         
306                         if (oldval == TOMBPRIME) return false; // Copy already completed here
307         
308                         slot *old_unboxed = new slot(false, oldval->_ptr);
309                         int copied_into_new = (putIfMatch(topmap, newkvs, key_slot, old_unboxed,
310                                 NULL) == NULL);
311         
312                         // Old value is exposed in the new table
313                         while (!CAS_val(oldkvs, idx, oldval, TOMBPRIME))
314                                 oldval = val(oldkvs, idx);
315         
316                         return copied_into_new;
317                 }
318         };
319
320         
321
322         private:
323         static const int Default_Init_Size = 8; // Intial table size
324
325         static slot* const MATCH_ANY;
326         static slot* const NO_MATCH_OLD;
327
328         static slot* const TOMBPRIME;
329         static slot* const TOMBSTONE;
330
331         static const int REPROBE_LIMIT = 10; // Forces a table-resize
332
333         static Hash hashFunc;
334         static KeyEqualsTo keyEqualsTo;
335         static ValEqualsTo valEqualsTo;
336
337         atomic<kvs_data*> _kvs;
338
339         public:
340         cliffc_hashtable() {
341                 // Should initialize the CHM for the construction of the table
342                 // For other CHM in kvs_data, they should be initialzed in resize()
343                 // because the size is determined dynamically
344                 kvs_data *kvs = new kvs_data(Default_Init_Size);
345                 void *chm = (void*) new CHM(0);
346                 kvs->_data[0].store(chm, memory_order_relaxed);
347                 _kvs.store(kvs, memory_order_release);
348         }
349
350         cliffc_hashtable(int init_size) {
351                 // Should initialize the CHM for the construction of the table
352                 // For other CHM in kvs_data, they should be initialzed in resize()
353                 // because the size is determined dynamically
354                 kvs_data *kvs = new kvs_data(init_size);
355                 void *chm = (void*) new CHM(0);
356                 kvs->_data[0].store(chm, memory_order_relaxed);
357                 _kvs.store(kvs, memory_order_release);
358         }
359
360         /**
361                 @Begin
362                 @Interface: Get
363                 @Commit_point_set: Read_Val_Point1 | Read_Val_Point2 | Read_Val_Point3
364                 @ID: _getKeyTag(key)
365                 @Action:
366                         @DefineVar: TypeV *_Old_Val = __map.get(key)
367                 @Post_check:
368                         _equals_val(_Old_Val, __RET__)
369                 @End
370         */
371         TypeV* get(TypeK& key) {
372                 void *key_ptr = (void*) new TypeK(key);
373                 slot *key_slot = new slot(false, key_ptr);
374                 int fullhash = hash(key_slot);
375                 slot *V = get_impl(this, _kvs.load(memory_order_relaxed), key_slot, fullhash);
376                 if (V == NULL) return NULL;
377                 assert (!is_prime(V));
378                 return (TypeV*) V->_ptr;
379         }
380
381         /**
382                 @Begin
383                 @Interface: Put
384                 @Commit_point_set: Write_Val_Point
385                 @ID: _getKeyTag(key)
386                 @Action:
387                         # Remember this old value at checking point
388                         @DefineVar: TypeV *_Old_Val = __map.get(key)
389                         @Code: __map.put(key, &val);
390                 @Post_check:
391                         _equals_val(__RET__, _Old_Val)
392                 @End
393         */
394         TypeV* put(TypeK& key, TypeV& val) {
395                 return putIfMatch(key, val, NO_MATCH_OLD);
396         }
397
398         /**
399                 @Begin
400                 @Interface: PutIfAbsent
401                 @Commit_point_set:
402                         Write_Val_Point | PutIfAbsent_Fail_Point
403                 @Condition: __map.get(key) == NULL
404                 @HB_condition:
405                         COND_PutIfAbsentSucc :: __RET__ == NULL
406                 @ID: _getKeyTag(key)
407                 @Action:
408                         @DefineVar: TypeV *_Old_Val = __map.get(key)
409                         @Code:
410                         if (COND_SAT)
411                                 __map.put(key, &value);
412                 @Post_check:
413                         COND_SAT ? __RET__ == NULL : _equals_val(_Old_Val, __RET__) 
414                 @End
415         */
416         TypeV* putIfAbsent(TypeK& key, TypeV& value) {
417                 return putIfMatch(key, val, TOMBSTONE);
418         }
419
420         /**
421                 @Begin
422                 @Interface: RemoveAny
423                 @Commit_point_set: Write_Val_Point
424                 @ID: _getKeyTag(key)
425                 @Action:
426                         @DefineVar: TypeV *_Old_Val = __map.get(key)
427                         @Code: __map.put(key, NULL);
428                 @Post_check:
429                         _equals_val(__RET__, _Old_Val)
430                 @End
431         */
432         TypeV* remove(TypeK& key) {
433                 return putIfMatch(key, TOMBSTONE, NO_MATCH_OLD);
434         }
435
436         /**
437                 @Begin
438                 @Interface: RemoveIfMatch
439                 @Commit_point_set:
440                         Write_Val_Point | RemoveIfMatch_Fail_Point
441                 @Condition:
442                         _equals_val(__map.get(key), &val)
443                 @HB_condition:
444                         COND_RemoveIfMatchSucc :: __RET__ == true
445                 @ID: _getKeyTag(key)
446                 @Action:
447                         @Code:
448                         if (COND_SAT)
449                                 __map.put(key, NULL);
450                 @Post_check:
451                         COND_SAT ? __RET__ : !__RET__
452                 @End
453         */
454         bool remove(TypeK& key, TypeV& val) {
455                 slot *val_slot = val == NULL ? NULL : new slot(false, val);
456                 return putIfMatch(key, TOMBSTONE, val) == val;
457
458         }
459
460         /**
461                 @Begin
462                 @Interface: ReplaceAny
463                 @Commit_point_set:
464                         Write_Val_Point
465                 @ID: _getKeyTag(key)
466                 @Action:
467                         @DefineVar: TypeV *_Old_Val = __map.get(key)
468                 @Post_check:
469                         _equals_val(__RET__, _Old_Val)
470                 @End
471         */
472         TypeV* replace(TypeK& key, TypeV& val) {
473                 return putIfMatch(key, val, MATCH_ANY);
474         }
475
476         /**
477                 @Begin
478                 @Interface: ReplaceIfMatch
479                 @Commit_point_set:
480                         Write_Val_Point | ReplaceIfMatch_Fail_Point
481                 @Condition:
482                         _equals_val(__map.get(key), &oldval)
483                 @HB_condition:
484                         COND_ReplaceIfMatchSucc :: __RET__ == true
485                 @ID: _getKeyTag(key)
486                 @Action:
487                         @Code:
488                         if (COND_SAT)
489                                 __map.put(key, &newval);
490                 @Post_check:
491                         COND_SAT ? __RET__ : !__RET__
492                 @End
493         */
494         bool replace(TypeK& key, TypeV& oldval, TypeV& newval) {
495                 return putIfMatch(key, newval, oldval) == oldval;
496         }
497
498         private:
499         static CHM* get_chm(kvs_data* kvs) {
500                 return (CHM*) kvs->_data[0].load(memory_order_relaxed);
501         }
502
503         static int* get_hashes(kvs_data *kvs) {
504                 return (int *) kvs->_data[1].load(memory_order_relaxed);
505         }
506         
507         // Preserve happens-before semantics on newly inserted keys
508         static inline slot* key(kvs_data *kvs, int idx) {
509                 assert (idx >= 0 && idx < kvs->_size);
510                 // Corresponding to the volatile read in get_impl() and putIfMatch in
511                 // Cliff Click's Java implementation
512                 return (slot*) kvs->_data[idx * 2 + 2].load(memory_order_acquire);
513         }
514
515         /**
516                 The atomic operation in val() function is a "potential" commit point,
517                 which means in some case it is a real commit point while it is not for
518                 some other cases. This so happens because the val() function is such a
519                 fundamental function that many internal operation will call. Our
520                 strategy is that we label any potential commit points and check if they
521                 really are the commit points later.
522         */
523         // Preserve happens-before semantics on newly inserted values
524         static inline slot* val(kvs_data *kvs, int idx) {
525                 assert (idx >= 0 && idx < kvs->_size);
526                 // Corresponding to the volatile read in get_impl() and putIfMatch in
527                 // Cliff Click's Java implementation
528                 slot *res = (slot*) kvs->_data[idx * 2 + 3].load(memory_order_acquire);
529                 /**
530                         @Begin
531                         # This is a complicated potential commit point since many many functions are
532                         # calling val().
533                         @Potential_commit_point_define: true
534                         @Label: Read_Val_Point
535                         @End
536                 */
537                 return res;
538
539
540         }
541
542         static int hash(slot *key_slot) {
543                 assert(key_slot != NULL && key_slot->_ptr != NULL);
544                 TypeK* key = (TypeK*) key_slot->_ptr;
545                 int h = hashFunc(*key);
546                 // Spread bits according to Cliff Click's code
547                 h += (h << 15) ^ 0xffffcd7d;
548                 h ^= (h >> 10);
549                 h += (h << 3);
550                 h ^= (h >> 6);
551                 h += (h << 2) + (h << 14);
552                 return h ^ (h >> 16);
553         }
554         
555         // Heuristic to decide if reprobed too many times. 
556         // Be careful here: Running over the limit on a 'get' acts as a 'miss'; on a
557         // put it triggers a table resize. Several places MUST have exact agreement.
558         static int reprobe_limit(int len) {
559                 return REPROBE_LIMIT + (len >> 2);
560         }
561         
562         static inline bool is_prime(slot *val) {
563                 return (val != NULL) && val->_prime;
564         }
565
566         // Check for key equality. Try direct pointer comparison first (fast
567         // negative teset) and then the full 'equals' call
568         static bool keyeq(slot *K, slot *key_slot, int *hashes, int hash,
569                 int fullhash) {
570                 // Caller should've checked this.
571                 assert (K != NULL);
572                 TypeK* key_ptr = (TypeK*) key_slot->_ptr;
573                 return
574                         K == key_slot ||
575                                 ((hashes[hash] == 0 || hashes[hash] == fullhash) &&
576                                 K != TOMBSTONE &&
577                                 keyEqualsTo(*key_ptr, *((TypeK*) K->_ptr)));
578         }
579
580         static bool valeq(slot *val_slot1, slot *val_slot2) {
581                 assert (val_slot1 != NULL);
582                 TypeK* ptr1 = (TypeV*) val_slot1->_ptr;
583                 if (val_slot2 == NULL || ptr1 == NULL) return false;
584                 return valEqualsTo(*ptr1, *((TypeV*) val_slot2->_ptr));
585         }
586         
587         // Together with key() preserve the happens-before relationship on newly
588         // inserted keys
589         static inline bool CAS_key(kvs_data *kvs, int idx, void *expected, void *desired) {
590                 return kvs->_data[2 * idx + 2].compare_exchange_strong(expected,
591                         desired, memory_order_release, memory_order_release);
592         }
593
594         /**
595                 Same as the val() function, we only label the CAS operation as the
596                 potential commit point.
597         */
598         // Together with val() preserve the happens-before relationship on newly
599         // inserted values
600         static inline bool CAS_val(kvs_data *kvs, int idx, void *expected, void
601                 *desired) {
602                 bool res =  kvs->_data[2 * idx + 3].compare_exchange_strong(expected,
603                         desired, memory_order_release, memory_order_release);
604                 /**
605                         # If it is a successful put instead of a copy or any other internal
606                         # operantions, expected != NULL
607                         @Begin
608                         @Potential_commit_point_define: __ATOMIC_RET__ == true
609                         @Label: Write_Val_Point
610                         @End
611                 */
612                 return res;
613         }
614
615         slot* get_impl(cliffc_hashtable *topmap, kvs_data *kvs, slot* key_slot, int
616                 fullhash) {
617                 int len = kvs->_size;
618                 CHM *chm = get_chm(kvs);
619                 int *hashes = get_hashes(kvs);
620
621                 int idx = fullhash & (len - 1);
622                 int reprobe_cnt = 0;
623                 while (true) {
624                         slot *K = key(kvs, idx);
625                         slot *V = val(kvs, idx);
626                         /**
627                                 @Begin
628                                 @Commit_point_define: V == NULL
629                                 @Potential_commit_point_label: Read_Val_Point
630                                 @Label: Get_Success_Point_1
631                                 @End
632                         */
633
634                         if (V == NULL) return NULL; // A miss
635                         
636                         if (keyeq(K, key_slot, hashes, idx, fullhash)) {
637                                 // Key hit! Check if table-resize in progress
638                                 if (!is_prime(V)) {
639                                         /**
640                                                 @Begin
641                                                 @Commit_point_define: true
642                                                 @Potential_commit_point_label: Read_Val_Point
643                                                 @Label: Get_Success_Point_2
644                                                 @End
645                                         */
646                                         return (V == TOMBSTONE) ? NULL : V; // Return this value
647                                 }
648                                 // Otherwise, finish the copy & retry in the new table
649                                 return get_impl(topmap, chm->copy_slot_and_check(topmap, kvs,
650                                         idx, key_slot), key_slot, fullhash);
651                         }
652
653                         if (++reprobe_cnt >= REPROBE_LIMIT ||
654                                 key_slot == TOMBSTONE) {
655                                 // Retry in new table
656                                 // Atomic read (acquire) can be here 
657                                 kvs_data *newkvs = chm->_newkvs.load(memory_order_acquire);
658                                 /**
659                                         @Begin
660                                         @Commit_point_define_check: newkvs == NULL
661                                         @Label: Get_Success_Point_3
662                                         @End
663                                 */
664                                 return newkvs == NULL ? NULL : get_impl(topmap,
665                                         topmap->help_copy(newkvs), key_slot, fullhash);
666                         }
667
668                         idx = (idx + 1) & (len - 1); // Reprobe by 1
669                 }
670         }
671
672         // A wrapper of the essential function putIfMatch()
673         TypeV* putIfMatch(TypeK& key, TypeV& value, slot *old_val) {
674                 // TODO: Should throw an exception rather return NULL
675                 if (old_val == NULL) {
676                         return NULL;
677                 }
678                 void *key_ptr = (void*) new TypeK(key);
679                 slot *key_slot = new slot(false, key_ptr);
680
681                 void *val_ptr = (void*) new TypeV(value);
682                 slot *value_slot = new slot(false, val_ptr);
683                 slot *res = putIfMatch(this, _kvs.load(memory_order_relaxed), key_slot,
684                         value_slot, old_val);
685                 // Only when copy_slot() call putIfMatch() will it return NULL
686                 assert (res != NULL); 
687                 assert (!is_prime(res));
688                 return res == TOMBSTONE ? NULL : (TypeV*) res->_ptr;
689         }
690
691         /**
692                 Put, Remove, PutIfAbsent, etc will call this function. Return the old
693                 value. If the returned value is equals to the expVal (or expVal is
694                 NO_MATCH_OLD), then this function puts the val_slot to the table 'kvs'.
695                 Only copy_slot will pass a NULL expVal, and putIfMatch only returns a
696                 NULL if passed a NULL expVal.
697         */
698         static slot* putIfMatch(cliffc_hashtable *topmap, kvs_data *kvs, slot
699                 *key_slot, slot *val_slot, slot *expVal) {
700                 assert (val_slot != NULL);
701                 assert (!is_prime(val_slot));
702                 assert (!is_prime(expVal));
703
704                 int fullhash = hash(key_slot);
705                 int len = kvs->_size;
706                 CHM *chm = get_chm(kvs);
707                 int *hashes = get_hashes(kvs);
708                 int idx = fullhash & (len - 1);
709
710                 // Claim a key slot
711                 int reprobe_cnt = 0;
712                 slot *K;
713                 slot *V;
714                 kvs_data *newkvs;
715                 
716                 while (true) { // Spin till we get a key slot
717                         K = key(kvs, idx);
718                         V = val(kvs, idx);
719                         if (K == NULL) { // Get a free slot
720                                 if (val_slot == TOMBSTONE) return val_slot;
721                                 // Claim the null key-slot
722                                 if (CAS_key(kvs, idx, NULL, key_slot)) {
723                                         chm->_slots.fetch_add(1, memory_order_relaxed); // Inc key-slots-used count
724                                         hashes[idx] = fullhash; // Memorize full hash
725                                         break;
726                                 }
727                                 K = key(kvs, idx); // CAS failed, get updated value
728                                 assert (K != NULL);
729                         }
730
731                         // Key slot not null, there exists a Key here
732                         if (keyeq(K, key_slot, hashes, idx, fullhash))
733                                 break; // Got it
734                         
735                         // Notice that the logic here should be consistent with that of get.
736                         // The first predicate means too many reprobes means nothing in the
737                         // old table.
738                         if (++reprobe_cnt >= reprobe_limit(len) ||
739                                 K == TOMBSTONE) { // Found a Tombstone key, no more keys
740                                 newkvs = chm->resize(topmap, kvs);
741                                 // Help along an existing copy
742                                 if (expVal != NULL) topmap->help_copy(newkvs);
743                                 return putIfMatch(topmap, newkvs, key_slot, val_slot, expVal);
744                         }
745
746                         idx = (idx + 1) & (len - 1); // Reprobe
747                 } // End of spinning till we get a Key slot
748
749                 if (val_slot == V) return V; // Fast cutout for no-change
750         
751                 // Here it tries to resize cause it doesn't want other threads to stop
752                 // its progress (eagerly try to resize soon)
753                 newkvs = chm->_newkvs.load(memory_order_acquire);
754                 if (newkvs == NULL &&
755                         ((V == NULL && chm->table_full(reprobe_cnt, len)) || is_prime(V)))
756                         newkvs = chm->resize(topmap, kvs); // Force the copy to start
757                 
758                 // Finish the copy and then put it in the new table
759                 if (newkvs != NULL)
760                         return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs, idx,
761                                 expVal), key_slot, val_slot, expVal);
762                 
763                 // Decided to update the existing table
764                 while (true) {
765                         assert (!is_prime(V));
766
767                         if (expVal != NO_MATCH_OLD &&
768                                 V != expVal &&
769                                 (expVal != MATCH_ANY || V == TOMBSTONE || V == NULL) &&
770                                 !(V == NULL && expVal == TOMBSTONE) &&
771                                 (expVal == NULL || !valeq(expVal, V))) {
772                                 /**
773                                         @Begin
774                                         @Commit_point_define: expVal == TOMBSTONE
775                                         @Potential_commit_point_label: Read_Val_Point
776                                         @Label: PutIfAbsent_Fail_Point
777                                                 # This is a check for the PutIfAbsent() when the value
778                                                 # is not absent
779                                         @End
780                                 */
781                                 /**
782                                         @Begin
783                                         @Commit_point_define: expVal != NULL && val_slot == TOMBSTONE
784                                         @Potential_commit_point_label: Read_Val_Point
785                                         @Label: RemoveIfMatch_Fail_Point
786                                         @End
787                                 */
788                                 /**
789                                         @Begin
790                                         @Commit_point_define: !valeq(expVal, V)
791                                         @Potential_commit_point_label: Read_Val_Point
792                                         @Label: ReplaceIfMatch_Fail_Point
793                                         @End
794                                 */
795                                 return V; // Do not update!
796                         }
797
798                         if (CAS_val(kvs, idx, V, val_slot)) {
799                                 /**
800                                         @Begin
801                                         # The only point where a successful put happens
802                                         @Commit_point_define: true
803                                         @Potential_commit_point_label: Write_Val_Point
804                                         @Label: Write_Success_Point
805                                         @End
806                                 */
807                                 if (expVal != NULL) { // Not called by a table-copy
808                                         // CAS succeeded, should adjust size
809                                         // Both normal put's and table-copy calls putIfMatch, but
810                                         // table-copy does not increase the number of live K/V pairs
811                                         if ((V == NULL || V == TOMBSTONE) &&
812                                                 val_slot != TOMBSTONE)
813                                                 chm->_size.fetch_add(1, memory_order_relaxed);
814                                         if (!(V == NULL || V == TOMBSTONE) &&
815                                                 val_slot == TOMBSTONE)
816                                                 chm->_size.fetch_add(-1, memory_order_relaxed);
817                                 }
818                                 return (V == NULL && expVal != NULL) ? TOMBSTONE : V;
819                         }
820                         // Else CAS failed
821                         V = val(kvs, idx);
822                         if (is_prime(V))
823                                 return putIfMatch(topmap, chm->copy_slot_and_check(topmap, kvs,
824                                         idx, expVal), key_slot, val_slot, expVal);
825                 }
826         }
827
828         // Help along an existing table-resize. This is a fast cut-out wrapper.
829         kvs_data* help_copy(kvs_data *helper) {
830                 kvs_data *topkvs = _kvs.load(memory_order_acquire);
831                 CHM *topchm = get_chm(topkvs);
832                 // No cpy in progress
833                 if (topchm->_newkvs.load(memory_order_acquire) == NULL) return helper;
834                 topchm->help_copy_impl(this, topkvs, false);
835                 return helper;
836         }
837 };
838
839 #endif