2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/test/DeterministicSchedule.h>
23 #include <unordered_map>
26 namespace folly { namespace test {
28 FOLLY_TLS sem_t* DeterministicSchedule::tls_sem;
29 FOLLY_TLS DeterministicSchedule* DeterministicSchedule::tls_sched;
31 // access is protected by futexLock
32 static std::unordered_map<detail::Futex<DeterministicAtomic>*,
33 std::list<std::pair<uint32_t,bool*>>> futexQueues;
35 static std::mutex futexLock;
37 DeterministicSchedule::DeterministicSchedule(
38 const std::function<int(int)>& scheduler)
39 : scheduler_(scheduler)
41 assert(tls_sem == nullptr);
42 assert(tls_sched == nullptr);
45 sem_init(tls_sem, 0, 1);
46 sems_.push_back(tls_sem);
51 DeterministicSchedule::~DeterministicSchedule() {
52 assert(tls_sched == this);
53 assert(sems_.size() == 1);
54 assert(sems_[0] == tls_sem);
58 std::function<int(int)>
59 DeterministicSchedule::uniform(long seed) {
60 auto rand = std::make_shared<std::ranlux48>(seed);
61 return [rand](size_t numActive) {
62 auto dist = std::uniform_int_distribution<int>(0, numActive - 1);
67 struct UniformSubset {
68 UniformSubset(long seed, int subsetSize, int stepsBetweenSelect)
69 : uniform_(DeterministicSchedule::uniform(seed))
70 , subsetSize_(subsetSize)
71 , stepsBetweenSelect_(stepsBetweenSelect)
76 size_t operator()(size_t numActive) {
77 adjustPermSize(numActive);
78 if (stepsLeft_-- == 0) {
79 stepsLeft_ = stepsBetweenSelect_ - 1;
82 return perm_[uniform_(std::min(numActive, subsetSize_))];
86 std::function<int(int)> uniform_;
87 const size_t subsetSize_;
88 const int stepsBetweenSelect_;
91 // only the first subsetSize_ is properly randomized
92 std::vector<int> perm_;
94 void adjustPermSize(size_t numActive) {
95 if (perm_.size() > numActive) {
96 perm_.erase(std::remove_if(perm_.begin(), perm_.end(),
97 [=](size_t x){ return x >= numActive; }), perm_.end());
99 while (perm_.size() < numActive) {
100 perm_.push_back(perm_.size());
103 assert(perm_.size() == numActive);
106 void shufflePrefix() {
107 for (size_t i = 0; i < std::min(perm_.size() - 1, subsetSize_); ++i) {
108 int j = uniform_(perm_.size() - i) + i;
109 std::swap(perm_[i], perm_[j]);
114 std::function<int(int)>
115 DeterministicSchedule::uniformSubset(long seed, int n, int m) {
116 auto gen = std::make_shared<UniformSubset>(seed, n, m);
117 return [=](size_t numActive) { return (*gen)(numActive); };
121 DeterministicSchedule::beforeSharedAccess() {
128 DeterministicSchedule::afterSharedAccess() {
129 auto sched = tls_sched;
134 sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
138 DeterministicSchedule::getRandNumber(int n) {
140 return tls_sched->scheduler_(n);
142 return std::rand() % n;
146 DeterministicSchedule::beforeThreadCreate() {
147 sem_t* s = new sem_t;
149 beforeSharedAccess();
156 DeterministicSchedule::afterThreadCreate(sem_t* sem) {
157 assert(tls_sem == nullptr);
158 assert(tls_sched == nullptr);
161 bool started = false;
163 beforeSharedAccess();
164 if (active_.count(std::this_thread::get_id()) == 1) {
172 DeterministicSchedule::beforeThreadExit() {
173 assert(tls_sched == this);
174 beforeSharedAccess();
175 sems_.erase(std::find(sems_.begin(), sems_.end(), tls_sem));
176 active_.erase(std::this_thread::get_id());
177 if (sems_.size() > 0) {
180 sem_destroy(tls_sem);
187 DeterministicSchedule::join(std::thread& child) {
188 auto sched = tls_sched;
192 beforeSharedAccess();
193 done = !sched->active_.count(child.get_id());
201 DeterministicSchedule::post(sem_t* sem) {
202 beforeSharedAccess();
208 DeterministicSchedule::tryWait(sem_t* sem) {
209 beforeSharedAccess();
210 int rv = sem_trywait(sem);
215 assert(errno == EAGAIN);
221 DeterministicSchedule::wait(sem_t* sem) {
222 while (!tryWait(sem)) {
223 // we're not busy waiting because this is a deterministic schedule
229 namespace folly { namespace detail {
231 using namespace test;
232 using namespace std::chrono;
236 Futex<DeterministicAtomic>::futexWaitImpl(
238 time_point<system_clock>* absSystemTimeout,
239 time_point<steady_clock>* absSteadyTimeout,
241 bool hasTimeout = absSystemTimeout != nullptr || absSteadyTimeout != nullptr;
243 FutexResult result = FutexResult::AWOKEN;
246 DeterministicSchedule::beforeSharedAccess();
248 if (data == expected) {
249 auto& queue = futexQueues[this];
250 queue.push_back(std::make_pair(waitMask, &awoken));
251 auto ours = queue.end();
255 DeterministicSchedule::afterSharedAccess();
256 DeterministicSchedule::beforeSharedAccess();
259 // Simulate spurious wake-ups, timeouts each time with
260 // a 10% probability if we haven't been woken up already
261 if (!awoken && hasTimeout &&
262 DeterministicSchedule::getRandNumber(100) < 10) {
263 assert(futexQueues.count(this) != 0 &&
264 &futexQueues[this] == &queue);
267 futexQueues.erase(this);
269 // Simulate ETIMEDOUT 90% of the time and other failures
272 DeterministicSchedule::getRandNumber(100) >= 10
273 ? FutexResult::TIMEDOUT : FutexResult::INTERRUPTED;
278 result = FutexResult::VALUE_CHANGED;
281 DeterministicSchedule::afterSharedAccess();
287 Futex<DeterministicAtomic>::futexWake(int count, uint32_t wakeMask) {
289 DeterministicSchedule::beforeSharedAccess();
291 if (futexQueues.count(this) > 0) {
292 auto& queue = futexQueues[this];
293 auto iter = queue.begin();
294 while (iter != queue.end() && rv < count) {
296 if ((cur->first & wakeMask) != 0) {
297 *(cur->second) = true;
303 futexQueues.erase(this);
307 DeterministicSchedule::afterSharedAccess();
313 CacheLocality const& CacheLocality::system<test::DeterministicAtomic>() {
314 static CacheLocality cache(CacheLocality::uniform(16));
319 test::DeterministicAtomic<size_t>
320 SequentialThreadId<test::DeterministicAtomic>::prevId(0);
324 SequentialThreadId<test::DeterministicAtomic>::currentId(0);
327 const AccessSpreader<test::DeterministicAtomic>
328 AccessSpreader<test::DeterministicAtomic>::stripeByCore(
329 CacheLocality::system<>().numCachesByLevel.front());
332 const AccessSpreader<test::DeterministicAtomic>
333 AccessSpreader<test::DeterministicAtomic>::stripeByChip(
334 CacheLocality::system<>().numCachesByLevel.back());
337 AccessSpreaderArray<test::DeterministicAtomic,128>
338 AccessSpreaderArray<test::DeterministicAtomic,128>::sharedInstance = {};
343 AccessSpreader<test::DeterministicAtomic>::pickGetcpuFunc(size_t numStripes) {
344 return &SequentialThreadId<test::DeterministicAtomic>::getcpu;