From: Yedidya Feldblum Date: Mon, 21 Jul 2014 21:31:09 +0000 (-0700) Subject: Fix a folly build failure under clang: MPMCQueueTest.cpp. X-Git-Tag: v0.22.0~439 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=a9a0480402b4d95a1303817f457cb8c3ac0493dd;p=folly.git Fix a folly build failure under clang: MPMCQueueTest.cpp. Summary: [Folly] Fix a folly build failure under clang: MPMCQueueTest.cpp. In clang-v3.4, there is a bug with the combination of a lambda expression inside a function template taking a template name (rather than a type name) as a template argument. This diff, in the interest of building folly under clang-v3.4, extracts the lambda expression into a separate function so that the function template taking a template name as a template argument no longer has a lambda expression in it. Test Plan: Build folly/test/MPMCQueueTest.cpp under clang. Reviewed By: njormrod@fb.com Subscribers: mathieubaudet, dougw FB internal diff: D1446279 Tasks: 4723132 --- diff --git a/folly/test/MPMCQueueTest.cpp b/folly/test/MPMCQueueTest.cpp index e688d770..ea0fbd12 100644 --- a/folly/test/MPMCQueueTest.cpp +++ b/folly/test/MPMCQueueTest.cpp @@ -39,6 +39,22 @@ using namespace test; typedef DeterministicSchedule DSched; +template class Atom> +void run_mt_sequencer_thread( + int numThreads, + int numOps, + uint32_t init, + TurnSequencer& seq, + Atom& spinThreshold, + int& prev, + int i) { + for (int op = i; op < numOps; op += numThreads) { + seq.waitForTurn(init + op, spinThreshold, (op % 32) == 0); + EXPECT_EQ(prev, op - 1); + prev = op; + seq.completeTurn(init + op); + } +} template class Atom> void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) { @@ -48,14 +64,9 @@ void run_mt_sequencer_test(int numThreads, int numOps, uint32_t init) { int prev = -1; std::vector threads(numThreads); for (int i = 0; i < numThreads; ++i) { - threads[i] = DSched::thread([&, i]{ - for (int op = i; op < numOps; op += numThreads) { - seq.waitForTurn(init + op, spinThreshold, (op % 32) == 0); - EXPECT_EQ(prev, op - 1); - prev = op; - seq.completeTurn(init + op); - } - }); + threads[i] = DSched::thread(std::bind(run_mt_sequencer_thread, + numThreads, numOps, init, std::ref(seq), std::ref(spinThreshold), + std::ref(prev), i)); } for (auto& thr : threads) { @@ -175,6 +186,33 @@ TEST(MPMCQueue, enq_capacity_test) { } } +template class Atom> +void runTryEnqDeqThread( + int numThreads, + int n, /*numOps*/ + MPMCQueue& cq, + std::atomic& sum, + int t) { + uint64_t threadSum = 0; + int src = t; + // received doesn't reflect any actual values, we just start with + // t and increment by numThreads to get the rounding of termination + // correct if numThreads doesn't evenly divide numOps + int received = t; + while (src < n || received < n) { + if (src < n && cq.write(src)) { + src += numThreads; + } + + int dst; + if (received < n && cq.read(dst)) { + received += numThreads; + threadSum += dst; + } + } + sum += threadSum; +} + template class Atom> void runTryEnqDeqTest(int numThreads, int numOps) { // write and read aren't linearizable, so we don't have @@ -186,26 +224,8 @@ void runTryEnqDeqTest(int numThreads, int numOps) { std::vector threads(numThreads); std::atomic sum(0); for (int t = 0; t < numThreads; ++t) { - threads[t] = DSched::thread([&,t]{ - uint64_t threadSum = 0; - int src = t; - // received doesn't reflect any actual values, we just start with - // t and increment by numThreads to get the rounding of termination - // correct if numThreads doesn't evenly divide numOps - int received = t; - while (src < n || received < n) { - if (src < n && cq.write(src)) { - src += numThreads; - } - - int dst; - if (received < n && cq.read(dst)) { - received += numThreads; - threadSum += dst; - } - } - sum += threadSum; - }); + threads[t] = DSched::thread(std::bind(runTryEnqDeqThread, + numThreads, n, std::ref(cq), std::ref(sum), t)); } for (auto& t : threads) { DSched::join(t); @@ -342,6 +362,26 @@ TEST(MPMCQueue, mt_prod_cons) { LOG(INFO) << PC_BENCH(MPMCQueue(100000), 32, 100, n); } +template class Atom> +void runNeverFailThread( + int numThreads, + int n, /*numOps*/ + MPMCQueue& cq, + std::atomic& sum, + int t) { + uint64_t threadSum = 0; + for (int i = t; i < n; i += numThreads) { + // enq + deq + EXPECT_TRUE(cq.writeIfNotFull(i)); + + int dest = -1; + EXPECT_TRUE(cq.readIfNotEmpty(dest)); + EXPECT_TRUE(dest >= 0); + threadSum += dest; + } + sum += threadSum; +} + template class Atom> uint64_t runNeverFailTest(int numThreads, int numOps) { // always #enq >= #deq @@ -353,19 +393,8 @@ uint64_t runNeverFailTest(int numThreads, int numOps) { std::vector threads(numThreads); std::atomic sum(0); for (int t = 0; t < numThreads; ++t) { - threads[t] = DSched::thread([&,t]{ - uint64_t threadSum = 0; - for (int i = t; i < n; i += numThreads) { - // enq + deq - EXPECT_TRUE(cq.writeIfNotFull(i)); - - int dest = -1; - EXPECT_TRUE(cq.readIfNotEmpty(dest)); - EXPECT_TRUE(dest >= 0); - threadSum += dest; - } - sum += threadSum; - }); + threads[t] = DSched::thread(std::bind(runNeverFailThread, + numThreads, n, std::ref(cq), std::ref(sum), t)); } for (auto& t : threads) { DSched::join(t);