10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
17 #include "sc_annotation.h"
19 #define relaxed memory_order_relaxed
20 #define release memory_order_release
21 #define acquire memory_order_acquire
22 #define acq_rel memory_order_acq_rel
23 #define seq_cst memory_order_seq_cst
28 For the sake of simplicity, we do not use template but some toy structs to
29 represent the Key and Value.
32 // Probably represent the coordinate (x, y, z)
38 return x + 31 * y + 31 * 31 * z;
41 bool equals(Key *other) {
44 return x == other->x && y == other->y && z == other->z;
47 Key(int x, int y, int z) :
57 // Probably represent the speed vector (vX, vY, vZ)
62 Value(int vX, int vY, int vZ) :
70 bool equals(Value *other) {
73 return vX == other->vX && vY == other->vY && vZ == other->vZ;
84 Entry(int h, Key *k, Value *v, Entry *n) {
87 this->value.store(v, relaxed);
88 this->next.store(n, relaxed);
112 atomic<Entry*> *table;
117 static const int CONCURRENCY_LEVEL = 16;
119 static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
121 Segment *segments[CONCURRENCY_LEVEL];
123 static const int DEFAULT_INITIAL_CAPACITY = 16;
125 // Not gonna consider resize now...
129 this->capacity = DEFAULT_INITIAL_CAPACITY;
130 this->table = new atomic<Entry*>[capacity];
131 for (int i = 0; i < capacity; i++) {
132 atomic_init(&table[i], NULL);
134 for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
135 segments[i] = new Segment;
139 int hashKey(Key *x) {
140 int h = x->hashCode();
141 // Use logical right shift
142 unsigned int tmp = (unsigned int) h;
143 return ((h << 7) - h + (tmp >> 9) + (tmp >> 17));
146 bool eq(Key *x, Key *y) {
147 return x == y || x->equals(y);
150 Value* get(Key *key) {
152 int hash = hashKey(key);
154 // Try first without locking...
155 atomic<Entry*> *tab = table;
156 int index = hash & (capacity - 1);
157 atomic<Entry*> *first = &tab[index];
161 // Should be a load acquire
162 // This load action here makes it problematic for the SC analysis, what
163 // we need to do is as follows: if the get() method ever acquires the
164 // lock, we ignore this operation for the SC analysis, and otherwise we
165 // take it into consideration
168 Entry *firstPtr = first->load(acquire);
174 if (e->hash == hash && eq(key, e->key)) {
175 res = e->value.load(seq_cst);
180 // Loading the next entry
181 e = e->next.load(acquire);
185 // Recheck under synch if key apparently not there or interference
186 Segment *seg = segments[hash & SEGMENT_MASK];
187 seg->lock(); // Critical region begins
188 // Not considering resize now, so ignore the reload of table...
190 // Synchronized by locking, no need to be load acquire
191 Entry *newFirstPtr = first->load(relaxed);
192 if (e != NULL || firstPtr != newFirstPtr) {
195 if (e->hash == hash && eq(key, e->key)) {
196 res = e->value.load(seq_cst);
197 seg->unlock(); // Critical region ends
200 // Synchronized by locking
201 e = e->next.load(relaxed);
204 seg->unlock(); // Critical region ends
208 Value* put(Key *key, Value *value) {
209 // Don't allow NULL key or value
210 MODEL_ASSERT (key && value);
212 int hash = hashKey(key);
213 Segment *seg = segments[hash & SEGMENT_MASK];
216 seg->lock(); // Critical region begins
218 int index = hash & (capacity - 1);
220 atomic<Entry*> *first = &tab[index];
222 Value *oldValue = NULL;
224 // The written of the entry is synchronized by locking
225 Entry *firstPtr = first->load(relaxed);
228 if (e->hash == hash && eq(key, e->key)) {
229 // FIXME: This could be a relaxed (because locking synchronize
230 // with the previous put())??
231 oldValue = e->value.load(acquire);
232 e->value.store(value, seq_cst);
233 seg->unlock(); // Don't forget to unlock before return
236 // Synchronized by locking
237 e = e->next.load(relaxed);
240 // Add to front of list
241 Entry *newEntry = new Entry(hash, key, value, firstPtr);
242 // Publish the newEntry to others
243 first->store(newEntry, release);
244 seg->unlock(); // Critical region ends
248 Value* remove(Key *key, Value *value) {
250 int hash = hashKey(key);
251 Segment *seg = segments[hash & SEGMENT_MASK];
254 seg->lock(); // Critical region begins
256 int index = hash & (capacity - 1);
258 atomic<Entry*> *first = &tab[index];
260 Value *oldValue = NULL;
262 // The written of the entry is synchronized by locking
263 Entry *firstPtr = first->load(relaxed);
268 seg->unlock(); // Don't forget to unlock
271 if (e->hash == hash && eq(key, e->key))
273 // Synchronized by locking
274 e = e->next.load(relaxed);
277 // FIXME: This could be a relaxed (because locking synchronize
278 // with the previous put())??
279 oldValue = e->value.load(acquire);
280 // If the value parameter is NULL, we will remove the entry anyway
281 if (value != NULL && value->equals(oldValue)) {
286 // Force the get() to grab the lock and retry
287 e->value.store(NULL, relaxed);
289 // The strategy to remove the entry is to keep the entries after the
290 // removed one and copy the ones before it
291 Entry *head = e->next.load(relaxed);
293 p = first->load(relaxed);
295 head = new Entry(p->hash, p->key, p->value.load(relaxed), head);
296 p = p->next.load(relaxed);
299 // Publish the new head to readers
300 first->store(head, release);
301 seg->unlock(); // Critical region ends