1 /*------------------------------------------------------------------------
2 Junction: Concurrent data structures in C++
3 Copyright (c) 2016 Jeff Preshing
5 Distributed under the Simplified BSD License.
6 Original location: https://github.com/preshing/junction
8 This software is distributed WITHOUT ANY WARRANTY; without even the
9 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10 See the LICENSE file for more information.
11 ------------------------------------------------------------------------*/
13 #include <junction/Core.h>
14 #include <turf/CPUTimer.h>
15 #include <turf/Util.h>
16 #include <turf/extra/UniqueSequence.h>
17 #include <turf/extra/JobDispatcher.h>
18 #include <turf/extra/Options.h>
19 #include <junction/extra/MapAdapter.h>
23 using namespace turf::intTypes;
24 typedef junction::extra::MapAdapter MapAdapter;
26 static const ureg NumKeysPerThread = 2000;
27 static const ureg DefaultReadsPerWrite = 4;
28 static const ureg DefaultItersPerChunk = 10000;
29 static const ureg DefaultChunks = 200;
30 static const u32 Prime = 0x4190ab09;
35 ureg numKeysPerThread;
39 turf::extra::SpinKicker spinKicker;
40 turf::Atomic<u32> doneFlag;
42 SharedState(MapAdapter& adapter, ureg numKeysPerThread, ureg readsPerWrite, ureg itersPerChunk)
43 : adapter(adapter), map(NULL), numKeysPerThread(numKeysPerThread), readsPerWrite(readsPerWrite),
44 itersPerChunk(itersPerChunk) {
45 doneFlag.storeNonatomic(0);
52 SharedState& m_shared;
53 MapAdapter::ThreadContext m_threadCtx;
70 Stats& operator+=(const Stats& other) {
71 mapOpsDone += other.mapOpsDone;
72 duration += other.duration;
76 bool operator<(const Stats& other) const {
77 return duration < other.duration;
83 ThreadState(SharedState& shared, ureg threadIndex, u32 rangeLo, u32 rangeHi)
84 : m_shared(shared), m_threadCtx(shared.adapter, threadIndex) {
85 m_threadIndex = threadIndex;
89 m_removeIndex = rangeLo;
92 void registerThread() {
93 m_threadCtx.registerThread();
96 void unregisterThread() {
97 m_threadCtx.unregisterThread();
100 void initialPopulate() {
101 TURF_ASSERT(m_addIndex == m_removeIndex);
102 MapAdapter::Map* map = m_shared.map;
103 for (ureg i = 0; i < m_shared.numKeysPerThread; i++) {
104 u32 key = m_addIndex * Prime;
106 map->insert(key, (void*) uptr(key));
107 if (++m_addIndex == m_rangeHi)
108 m_addIndex = m_rangeLo;
113 MapAdapter::Map* map = m_shared.map;
114 turf::CPUTimer::Converter converter;
116 ureg lookupIndex = m_rangeLo;
117 ureg remaining = m_shared.itersPerChunk;
118 if (m_threadIndex == 0)
119 m_shared.spinKicker.kick(m_shared.numThreads - 1);
122 m_shared.spinKicker.waitForKick();
126 turf::CPUTimer::Point start = turf::CPUTimer::get();
127 for (; remaining > 0; remaining--) {
129 if (m_shared.doneFlag.load(turf::Relaxed))
131 u32 key = m_addIndex * Prime;
133 map->insert(key, (void*) uptr(key));
136 if (++m_addIndex == m_rangeHi)
137 m_addIndex = m_rangeLo;
140 if (s32(lookupIndex - m_removeIndex) < 0)
141 lookupIndex = m_removeIndex;
142 for (ureg l = 0; l < m_shared.readsPerWrite; l++) {
143 if (m_shared.doneFlag.load(turf::Relaxed))
145 key = lookupIndex * Prime;
147 volatile void* value = map->get(key);
151 if (++lookupIndex == m_rangeHi)
152 lookupIndex = m_rangeLo;
153 if (lookupIndex == m_addIndex)
154 lookupIndex = m_removeIndex;
158 if (m_shared.doneFlag.load(turf::Relaxed))
160 key = m_removeIndex * Prime;
165 if (++m_removeIndex == m_rangeHi)
166 m_removeIndex = m_rangeLo;
169 if (s32(lookupIndex - m_removeIndex) < 0)
170 lookupIndex = m_removeIndex;
171 for (ureg l = 0; l < m_shared.readsPerWrite; l++) {
172 if (m_shared.doneFlag.load(turf::Relaxed))
174 key = lookupIndex * Prime;
176 volatile void* value = map->get(key);
180 if (++lookupIndex == m_rangeHi)
181 lookupIndex = m_rangeLo;
182 if (lookupIndex == m_addIndex)
183 lookupIndex = m_removeIndex;
186 if (m_threadIndex == 0)
187 m_shared.doneFlag.store(1, turf::Relaxed);
188 m_threadCtx.update();
189 turf::CPUTimer::Point end = turf::CPUTimer::get();
192 stats.duration = converter.toSeconds(end - start);
197 static const turf::extra::Option Options[] = {
198 {"readsPerWrite", 'r', true, "number of reads per write"},
199 {"itersPerChunk", 'i', true, "number of iterations per chunk"},
200 {"chunks", 'c', true, "number of chunks to execute"},
201 {"keepChunkFraction", 'k', true, "threshold fraction of chunk timings to keep"},
204 int main(int argc, const char** argv) {
205 turf::extra::Options options(Options, TURF_STATIC_ARRAY_SIZE(Options));
206 options.parse(argc, argv);
207 ureg readsPerWrite = options.getInteger("readsPerWrite", DefaultReadsPerWrite);
208 ureg itersPerChunk = options.getInteger("itersPerChunk", DefaultItersPerChunk);
209 ureg chunks = options.getInteger("chunks", DefaultChunks);
210 double keepChunkFraction = options.getDouble("keepChunkFraction", 1.0);
212 turf::extra::JobDispatcher dispatcher;
213 ureg numCores = dispatcher.getNumPhysicalCores();
214 TURF_ASSERT(numCores > 0);
215 MapAdapter adapter(numCores);
217 // Create shared state and register first thread
218 SharedState shared(adapter, NumKeysPerThread, readsPerWrite, itersPerChunk);
219 std::vector<ThreadState> threads;
220 threads.reserve(numCores);
221 for (ureg t = 0; t < numCores; t++) {
222 u32 rangeLo = 0xffffffffu / numCores * t + 1;
223 u32 rangeHi = 0xffffffffu / numCores * (t + 1) + 1;
224 threads.emplace_back(shared, t, rangeLo, rangeHi);
226 dispatcher.kickOne(0, &ThreadState::registerThread, threads[0]);
229 // Create the map and populate it entirely from main thread
230 MapAdapter::Map map(MapAdapter::getInitialCapacity(numCores * NumKeysPerThread));
232 for (ureg t = 0; t < numCores; t++) {
233 threads[t].initialPopulate();
237 printf("'mapType': '%s',\n", MapAdapter::MapName);
238 printf("'population': %d,\n", (int) (numCores * NumKeysPerThread));
239 printf("'readsPerWrite': %d,\n", (int) readsPerWrite);
240 printf("'itersPerChunk': %d,\n", (int) itersPerChunk);
241 printf("'chunks': %d,\n", (int) chunks);
242 printf("'keepChunkFraction': %f,\n", keepChunkFraction);
243 printf("'labels': ('numThreads', 'mapOpsDone', 'totalTime'),\n"), printf("'points': [\n");
244 for (shared.numThreads = 1; shared.numThreads <= numCores; shared.numThreads++) {
245 if (shared.numThreads > 1) {
246 // Spawn and register a new thread
247 dispatcher.kickOne(shared.numThreads - 1, &ThreadState::registerThread, threads[shared.numThreads - 1]);
250 std::vector<ThreadState::Stats> kickTotals;
251 for (ureg c = 0; c < chunks; c++) {
252 shared.doneFlag.storeNonatomic(false);
253 dispatcher.kickMulti(&ThreadState::run, &threads[0], shared.numThreads);
255 ThreadState::Stats kickTotal;
256 for (ureg t = 0; t < shared.numThreads; t++)
257 kickTotal += threads[t].m_stats;
258 kickTotals.push_back(kickTotal);
261 std::sort(kickTotals.begin(), kickTotals.end());
262 ThreadState::Stats totals;
263 for (ureg t = 0; t < ureg(kickTotals.size() * keepChunkFraction); t++) {
264 totals += kickTotals[t];
267 printf(" (%d, %d, %f),\n", int(shared.numThreads), int(totals.mapOpsDone), totals.duration);
275 dispatcher.kickMulti(&ThreadState::unregisterThread, &threads[0], threads.size());