#undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
#endif
+#include <algorithm>
#include <atomic>
#include <string>
-#include <algorithm>
+#include <thread>
-#include <sched.h>
#include <glog/logging.h>
#include <folly/Likely.h>
void lock() {
int count = 0;
while (!LIKELY(try_lock())) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
void lock_shared() {
int count = 0;
while (!LIKELY(try_lock_shared())) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
void lock_upgrade() {
int count = 0;
while (!try_lock_upgrade()) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
void unlock_upgrade_and_lock() {
int64_t count = 0;
while (!try_unlock_upgrade_and_lock()) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
* turns.
*/
void writeLockAggressive() {
- // sched_yield() is needed here to avoid a pathology if the number
+ // std::this_thread::yield() is needed here to avoid a pathology if the number
// of threads attempting concurrent writes is >= the number of real
// cores allocated to this process. This is less likely than the
// corresponding situation in lock_shared(), but we still want to
QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
while (val != load_acquire(&ticket.write)) {
asm_volatile_pause();
- if (UNLIKELY(++count > 1000)) sched_yield();
+ if (UNLIKELY(++count > 1000)) std::this_thread::yield();
}
}
// there are a lot of competing readers. The aggressive spinning
// can help to avoid starving writers.
//
- // We don't worry about sched_yield() here because the caller
+ // We don't worry about std::this_thread::yield() here because the caller
// has already explicitly abandoned fairness.
while (!try_lock()) {}
}
}
void lock_shared() {
- // sched_yield() is important here because we can't grab the
+ // std::this_thread::yield() is important here because we can't grab the
// shared lock if there is a pending writeLockAggressive, so we
// need to let threads that already have a shared lock complete
int count = 0;
while (!LIKELY(try_lock_shared())) {
asm_volatile_pause();
- if (UNLIKELY((++count & 1023) == 0)) sched_yield();
+ if (UNLIKELY((++count & 1023) == 0)) std::this_thread::yield();
}
}
#include <folly/detail/CacheLocality.h>
-#include <sched.h>
#include <memory>
#include <thread>
#include <unordered_map>
+
#include <glog/logging.h>
+
#include <folly/Benchmark.h>
using namespace folly::detail;
ready++;
while (!go.load()) {
- sched_yield();
+ std::this_thread::yield();
}
std::atomic<int> localWork(0);
for (size_t i = iters; i > 0; --i) {
}
while (ready < numThreads) {
- sched_yield();
+ std::this_thread::yield();
}
braces.dismiss();
go = true;
while (threads.size() < numThreads) {
threads.push_back(std::thread([&]() {
while (!go.load()) {
- sched_yield();
+ std::this_thread::yield();
}
std::atomic<size_t> localCounter(0);
std::atomic<int> localWork(0);