10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
18 #include "sc_annotation.h"
20 #define relaxed memory_order_relaxed
21 #define release memory_order_release
22 #define acquire memory_order_acquire
23 #define acq_rel memory_order_acq_rel
24 #define seq_cst memory_order_seq_cst
29 For the sake of simplicity, we do not use template but some toy structs to
30 represent the Key and Value.
33 // Probably represent the coordinate (x, y, z)
39 return x + 31 * y + 31 * 31 * z;
42 bool equals(Key *other) {
45 return x == other->x && y == other->y && z == other->z;
48 Key(int x, int y, int z) :
58 // Probably represent the speed vector (vX, vY, vZ)
63 Value(int vX, int vY, int vZ) :
71 bool equals(Value *other) {
74 return vX == other->vX && vY == other->vY && vZ == other->vZ;
85 Entry(int h, Key *k, Value *v, Entry *n) {
88 this->value.store(v, relaxed);
89 this->next.store(n, relaxed);
113 atomic<Entry*> *table;
118 static const int CONCURRENCY_LEVEL = 16;
120 static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
122 Segment *segments[CONCURRENCY_LEVEL];
124 static const int DEFAULT_INITIAL_CAPACITY = 16;
126 // Not gonna consider resize now...
130 this->capacity = DEFAULT_INITIAL_CAPACITY;
131 this->table = new atomic<Entry*>[capacity];
132 for (int i = 0; i < capacity; i++) {
133 atomic_init(&table[i], NULL);
135 for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
136 segments[i] = new Segment;
140 int hashKey(Key *x) {
141 int h = x->hashCode();
142 // Use logical right shift
143 unsigned int tmp = (unsigned int) h;
144 return ((h << 7) - h + (tmp >> 9) + (tmp >> 17));
147 bool eq(Key *x, Key *y) {
148 return x == y || x->equals(y);
151 Value* get(Key *key) {
153 int hash = hashKey(key);
155 // Try first without locking...
156 atomic<Entry*> *tab = table;
157 int index = hash & (capacity - 1);
158 atomic<Entry*> *first = &tab[index];
162 // Should be a load acquire
163 // This load action here makes it problematic for the SC analysis, what
164 // we need to do is as follows: if the get() method ever acquires the
165 // lock, we ignore this operation for the SC analysis, and otherwise we
166 // take it into consideration
169 Entry *firstPtr = first->load(acquire);
175 if (e->hash == hash && eq(key, e->key)) {
176 res = e->value.load(seq_cst);
182 // Loading the next entry
183 e = e->next.load(acquire);
186 // Recheck under synch if key apparently not there or interference
187 Segment *seg = segments[hash & SEGMENT_MASK];
188 seg->lock(); // Critical region begins
189 // Not considering resize now, so ignore the reload of table...
191 // Synchronized by locking, no need to be load acquire
192 Entry *newFirstPtr = first->load(relaxed);
193 if (e != NULL || firstPtr != newFirstPtr) {
196 if (e->hash == hash && eq(key, e->key)) {
197 res = e->value.load(seq_cst);
198 seg->unlock(); // Critical region ends
201 // Synchronized by locking
202 e = e->next.load(relaxed);
205 seg->unlock(); // Critical region ends
209 Value* put(Key *key, Value *value) {
210 // Don't allow NULL key or value
211 MODEL_ASSERT (key && value);
213 int hash = hashKey(key);
214 Segment *seg = segments[hash & SEGMENT_MASK];
217 seg->lock(); // Critical region begins
219 int index = hash & (capacity - 1);
221 atomic<Entry*> *first = &tab[index];
223 Value *oldValue = NULL;
225 // The written of the entry is synchronized by locking
226 Entry *firstPtr = first->load(relaxed);
229 if (e->hash == hash && eq(key, e->key)) {
230 // FIXME: This could be a relaxed (because locking synchronize
231 // with the previous put())??
232 oldValue = e->value.load(acquire);
233 e->value.store(value, seq_cst);
234 seg->unlock(); // Don't forget to unlock before return
237 // Synchronized by locking
238 e = e->next.load(relaxed);
241 // Add to front of list
242 Entry *newEntry = new Entry(hash, key, value, firstPtr);
243 // Publish the newEntry to others
244 first->store(newEntry, release);
245 seg->unlock(); // Critical region ends
249 Value* remove(Key *key, Value *value) {
251 int hash = hashKey(key);
252 Segment *seg = segments[hash & SEGMENT_MASK];
255 seg->lock(); // Critical region begins
257 int index = hash & (capacity - 1);
259 atomic<Entry*> *first = &tab[index];
261 Value *oldValue = NULL;
263 // The written of the entry is synchronized by locking
264 Entry *firstPtr = first->load(relaxed);
269 seg->unlock(); // Don't forget to unlock
272 if (e->hash == hash && eq(key, e->key))
274 // Synchronized by locking
275 e = e->next.load(relaxed);
278 // FIXME: This could be a relaxed (because locking synchronize
279 // with the previous put())??
280 oldValue = e->value.load(acquire);
281 // If the value parameter is NULL, we will remove the entry anyway
282 if (value != NULL && value->equals(oldValue)) {
287 // Force the get() to grab the lock and retry
288 e->value.store(NULL, relaxed);
290 // The strategy to remove the entry is to keep the entries after the
291 // removed one and copy the ones before it
292 Entry *head = e->next.load(relaxed);
294 p = first->load(relaxed);
296 head = new Entry(p->hash, p->key, p->value.load(relaxed), head);
297 p = p->next.load(relaxed);
300 // Publish the new head to readers
301 first->store(head, release);
302 seg->unlock(); // Critical region ends