From: James Sedgwick Date: Wed, 24 Sep 2014 17:30:21 +0000 (-0700) Subject: de-flake tests X-Git-Tag: v0.22.0~330 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=471b4b72c9adf0570ba3999e680906b2d833d1ae;p=folly.git de-flake tests Summary: make these more serialized / event based so they don't get flakey under high load Test Plan: ran under load - caveat: i was not able to repro the flakiness @njormrod reported but by inspection these should be fine Reviewed By: njormrod@fb.com Subscribers: fugalh, njormrod FB internal diff: D1574640 Tasks: 5225808 --- diff --git a/folly/experimental/wangle/concurrent/test/ThreadPoolExecutorTest.cpp b/folly/experimental/wangle/concurrent/test/ThreadPoolExecutorTest.cpp index 8b972773..d36cca78 100644 --- a/folly/experimental/wangle/concurrent/test/ThreadPoolExecutorTest.cpp +++ b/folly/experimental/wangle/concurrent/test/ThreadPoolExecutorTest.cpp @@ -132,33 +132,24 @@ TEST(ThreadPoolExecutorTest, IOResizeUnderLoad) { template static void poolStats() { - { - TPE tpe(10); - for (int i = 0; i < 20; i++) { - tpe.add(burnMs(20)); - } - burnMs(10)(); - auto stats = tpe.getPoolStats(); - EXPECT_EQ(10, stats.threadCount); - EXPECT_EQ(0, stats.idleThreadCount); - EXPECT_EQ(10, stats.activeThreadCount); - EXPECT_EQ(10, stats.pendingTaskCount); - EXPECT_EQ(20, stats.totalTaskCount); - } - - { - TPE tpe(10); - for (int i = 0; i < 5; i++) { - tpe.add(burnMs(20)); - } - burnMs(10)(); - auto stats = tpe.getPoolStats(); - EXPECT_EQ(10, stats.threadCount); - EXPECT_EQ(5, stats.idleThreadCount); - EXPECT_EQ(5, stats.activeThreadCount); - EXPECT_EQ(0, stats.pendingTaskCount); - EXPECT_EQ(5, stats.totalTaskCount); - } + folly::Baton<> startBaton, endBaton; + TPE tpe(1); + auto stats = tpe.getPoolStats(); + EXPECT_EQ(1, stats.threadCount); + EXPECT_EQ(1, stats.idleThreadCount); + EXPECT_EQ(0, stats.activeThreadCount); + EXPECT_EQ(0, stats.pendingTaskCount); + EXPECT_EQ(0, stats.totalTaskCount); + tpe.add([&](){ startBaton.post(); endBaton.wait(); }); + tpe.add([&](){}); + startBaton.wait(); + stats = tpe.getPoolStats(); + EXPECT_EQ(1, stats.threadCount); + EXPECT_EQ(0, stats.idleThreadCount); + EXPECT_EQ(1, stats.activeThreadCount); + EXPECT_EQ(1, stats.pendingTaskCount); + EXPECT_EQ(2, stats.totalTaskCount); + endBaton.post(); } TEST(ThreadPoolExecutorTest, CPUPoolStats) { @@ -171,27 +162,20 @@ TEST(ThreadPoolExecutorTest, IOPoolStats) { template static void taskStats() { - TPE tpe(10); + TPE tpe(1); std::atomic c(0); tpe.subscribeToTaskStats(Observer::create( [&] (ThreadPoolExecutor::TaskStats stats) { int i = c++; - if (i < 10) { - EXPECT_GE(milliseconds(10), stats.waitTime); - EXPECT_LE(milliseconds(20), stats.runTime); - } else { - EXPECT_LE(milliseconds(10), stats.waitTime); - EXPECT_LE(milliseconds(10), stats.runTime); + EXPECT_LT(milliseconds(0), stats.runTime); + if (i == 1) { + EXPECT_LT(milliseconds(0), stats.waitTime); } })); - for (int i = 0; i < 10; i++) { - tpe.add(burnMs(20)); - } - for (int i = 0; i < 10; i++) { - tpe.add(burnMs(10)); - } + tpe.add(burnMs(10)); + tpe.add(burnMs(10)); tpe.join(); - EXPECT_EQ(20, c); + EXPECT_EQ(2, c); } TEST(ThreadPoolExecutorTest, CPUTaskStats) {