10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
17 #define relaxed memory_order_relaxed
18 #define release memory_order_release
19 #define acquire memory_order_acquire
20 #define acq_rel memory_order_acq_rel
21 #define seq_cst memory_order_seq_cst
26 For the sake of simplicity, we do not use template but some toy structs to
27 represent the Key and Value.
30 // Probably represent the coordinate (x, y, z)
36 return x + 31 * y + 31 * 31 * z;
39 bool equals(Key *other) {
42 return x == other->x && y == other->y && z == other->z;
45 Key(int x, int y, int z) :
55 // Probably represent the speed vector (vX, vY, vZ)
60 Value(int vX, int vY, int vZ) :
68 bool equals(Value *other) {
71 return vX == other->vX && vY == other->vY && vZ == other->vZ;
82 Entry(int h, Key *k, Value *v, Entry *n) {
85 this->value.store(v, relaxed);
86 this->next.store(n, relaxed);
110 atomic<Entry*> *table;
115 static const int CONCURRENCY_LEVEL = 16;
117 static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
119 Segment *segments[CONCURRENCY_LEVEL];
121 static const int DEFAULT_INITIAL_CAPACITY = 16;
123 // Not gonna consider resize now...
127 this->capacity = DEFAULT_INITIAL_CAPACITY;
128 this->table = new atomic<Entry*>[capacity];
129 for (int i = 0; i < capacity; i++) {
130 atomic_init(&table[i], NULL);
132 for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
133 segments[i] = new Segment;
137 int hashKey(Key *x) {
138 int h = x->hashCode();
139 // Use logical right shift
140 unsigned int tmp = (unsigned int) h;
141 return ((h << 7) - h + (tmp >> 9) + (tmp >> 17));
144 bool eq(Key *x, Key *y) {
145 return x == y || x->equals(y);
148 Value* get(Key *key) {
150 int hash = hashKey(key);
152 // Try first without locking...
153 atomic<Entry*> *tab = table;
154 int index = hash & (capacity - 1);
155 atomic<Entry*> *first = &tab[index];
159 // Should be a load acquire
160 Entry *firstPtr = first->load(acquire);
163 if (e->hash == hash && eq(key, e->key)) {
164 res = e->value.load(seq_cst);
169 // Loading the next entry
170 e = e->next.load(acquire);
174 // Recheck under synch if key apparently not there or interference
175 Segment *seg = segments[hash & SEGMENT_MASK];
176 seg->lock(); // Critical region begins
177 // Not considering resize now, so ignore the reload of table...
179 // Synchronized by locking, no need to be load acquire
180 Entry *newFirstPtr = first->load(relaxed);
181 if (e != NULL || firstPtr != newFirstPtr) {
184 if (e->hash == hash && eq(key, e->key)) {
185 res = e->value.load(seq_cst);
188 // Synchronized by locking
189 e = e->next.load(relaxed);
192 seg->unlock(); // Critical region ends
196 Value* put(Key *key, Value *value) {
197 // Don't allow NULL key or value
198 MODEL_ASSERT (key && value);
200 int hash = hashKey(key);
201 Segment *seg = segments[hash & SEGMENT_MASK];
204 seg->lock(); // Critical region begins
206 int index = hash & (capacity - 1);
208 atomic<Entry*> *first = &tab[index];
210 Value *oldValue = NULL;
212 // The written of the entry is synchronized by locking
213 Entry *firstPtr = first->load(relaxed);
216 if (e->hash == hash && eq(key, e->key)) {
217 // FIXME: This could be a relaxed (because locking synchronize
218 // with the previous put())??
219 oldValue = e->value.load(acquire);
220 e->value.store(value, seq_cst);
221 seg->unlock(); // Don't forget to unlock before return
224 // Synchronized by locking
225 e = e->next.load(relaxed);
228 // Add to front of list
229 Entry *newEntry = new Entry(hash, key, value, firstPtr);
230 // Publish the newEntry to others
231 first->store(newEntry, release);
232 seg->unlock(); // Critical region ends
236 Value* remove(Key *key, Value *value) {
238 int hash = hashKey(key);
239 Segment *seg = segments[hash & SEGMENT_MASK];
242 seg->lock(); // Critical region begins
244 int index = hash & (capacity - 1);
246 atomic<Entry*> *first = &tab[index];
248 Value *oldValue = NULL;
250 // The written of the entry is synchronized by locking
251 Entry *firstPtr = first->load(relaxed);
256 seg->unlock(); // Don't forget to unlock
259 if (e->hash == hash && eq(key, e->key))
261 // Synchronized by locking
262 e = e->next.load(relaxed);
265 // FIXME: This could be a relaxed (because locking synchronize
266 // with the previous put())??
267 oldValue = e->value.load(acquire);
268 // If the value parameter is NULL, we will remove the entry anyway
269 if (value != NULL && value->equals(oldValue))
272 // Force the get() to grab the lock and retry
273 e->value.store(NULL, relaxed);
275 // The strategy to remove the entry is to keep the entries after the
276 // removed one and copy the ones before it
277 Entry *head = e->next.load(relaxed);
279 p = first->load(relaxed);
281 head = new Entry(p->hash, p->key, p->value.load(relaxed), head);
282 p = p->next.load(relaxed);
285 // Publish the new head to readers
286 first->store(head, release);
287 seg->unlock(); // Critical region ends