10 #define MODEL_ASSERT assert
12 #include <model-assert.h>
17 #define relaxed memory_order_relaxed
18 #define release memory_order_release
19 #define acquire memory_order_acquire
20 #define acq_rel memory_order_acq_rel
21 #define seq_cst memory_order_seq_cst
26 For the sake of simplicity, we do not use template but some toy structs to
27 represent the Key and Value.
30 // Probably represent the coordinate (x, y, z)
36 return x + 31 * y + 31 * 31 * z;
39 bool equals(Key *other) {
42 return x == other->x && y == other->y && z == other->z;
45 Key(int x, int y, int z) :
55 // Probably represent the speed vector (vX, vY, vZ)
60 Value(int vX, int vY, int vZ) :
76 Entry(int h, Key *k, Value *v, Entry *n) {
79 this->value.store(v, relaxed);
80 this->next.store(n, relaxed);
104 atomic<Entry*> *table;
109 static const int CONCURRENCY_LEVEL = 16;
111 static const int SEGMENT_MASK = CONCURRENCY_LEVEL - 1;
113 Segment *segments[CONCURRENCY_LEVEL];
115 static const int DEFAULT_INITIAL_CAPACITY = 16;
117 // Not gonna consider resize now...
121 this->capacity = DEFAULT_INITIAL_CAPACITY;
122 this->table = new atomic<Entry*>[capacity];
123 for (int i = 0; i < capacity; i++) {
124 atomic_init(&table[i], NULL);
126 for (int i = 0; i < CONCURRENCY_LEVEL; i++) {
127 segments[i] = new Segment;
131 int hashKey(Key *x) {
132 int h = x->hashCode();
133 // Use logical right shift
134 unsigned int tmp = (unsigned int) h;
135 return ((h << 7) - h + (tmp >> 9) + (tmp >> 17));
138 bool eq(Key *x, Key *y) {
139 return x == y || x->equals(y);
142 Value* get(Key *key) {
143 int hash = hashKey(key); // throws null pointer exception if key null
145 // Try first without locking...
146 atomic<Entry*> *tab = table;
147 int index = hash & (capacity - 1);
148 atomic<Entry*> *first = &tab[index];
152 // Should be a load acquire
153 Entry *firstPtr = first->load(acquire);
156 if (e->hash == hash && eq(key, e->key)) {
157 res = e->value.load(seq_cst);
162 // Loading the next entry
163 e = e->next.load(acquire);
167 // Recheck under synch if key apparently not there or interference
168 Segment *seg = segments[hash & SEGMENT_MASK];
169 seg->lock(); // Critical region begins
170 // Not considering resize now, so ignore the reload of table...
172 // Synchronized by locking, no need to be load acquire
173 Entry *newFirstPtr = first->load(relaxed);
174 if (e != NULL || firstPtr != newFirstPtr) {
177 if (e->hash == hash && eq(key, e->key)) {
178 res = e->value.load(seq_cst);
181 // Synchronized by locking
182 e = e->next.load(relaxed);
185 seg->unlock(); // Critical region ends
189 Value* put(Key *key, Value *value) {
190 // Don't allow NULL value
191 //ASSERT (value != NULL);
193 int hash = hashKey(key);
194 Segment *seg = segments[hash & SEGMENT_MASK];
197 seg->lock(); // Critical region begins
199 int index = hash & (capacity - 1);
201 atomic<Entry*> *first = &tab[index];
205 // The written of the entry is synchronized by locking
206 Entry *firstPtr = first->load(relaxed);
209 if (e->hash == hash && eq(key, e->key)) {
210 // FIXME: This could be an acquire??
211 res = e->value.load(acquire);
212 e->value.store(value, seq_cst);
213 seg->unlock(); // Don't forget to unlock before return
218 // Add to front of list
219 Entry *newEntry = new Entry(hash, key, value, firstPtr);
220 // Publish the newEntry to others
221 first->store(newEntry, release);
222 seg->unlock(); // Critical region ends