2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <folly/Portability.h>
20 #include <folly/Preprocessor.h> // for FB_ANONYMOUS_VARIABLE
21 #include <folly/ScopeGuard.h>
22 #include <folly/portability/GFlags.h>
23 #include <folly/portability/Time.h>
27 #include <boost/function_types/function_arity.hpp>
29 #include <glog/logging.h>
31 #include <type_traits>
33 DECLARE_bool(benchmark);
38 * Runs all benchmarks defined. Usually put in main().
43 * Runs all benchmarks defined if and only if the --benchmark flag has
44 * been passed to the program. Usually put in main().
46 inline bool runBenchmarksOnFlag() {
47 if (FLAGS_benchmark) {
50 return FLAGS_benchmark;
56 * This is the clock ID used for measuring time. On older kernels, the
57 * resolution of this clock will be very coarse, which will cause the
60 enum Clock { DEFAULT_CLOCK_ID = CLOCK_REALTIME };
62 typedef std::pair<uint64_t, unsigned int> TimeIterPair;
65 * Adds a benchmark wrapped in a std::function. Only used
66 * internally. Pass by value is intentional.
68 void addBenchmarkImpl(const char* file,
70 std::function<TimeIterPair(unsigned int)>);
73 * Takes the difference between two timespec values. end is assumed to
76 inline uint64_t timespecDiff(timespec end, timespec start) {
77 if (end.tv_sec == start.tv_sec) {
78 assert(end.tv_nsec >= start.tv_nsec);
79 return end.tv_nsec - start.tv_nsec;
81 assert(end.tv_sec > start.tv_sec);
82 auto diff = uint64_t(end.tv_sec - start.tv_sec);
84 std::numeric_limits<uint64_t>::max() / 1000000000UL);
85 return diff * 1000000000UL
86 + end.tv_nsec - start.tv_nsec;
90 * Takes the difference between two sets of timespec values. The first
91 * two come from a high-resolution clock whereas the other two come
92 * from a low-resolution clock. The crux of the matter is that
93 * high-res values may be bogus as documented in
94 * http://linux.die.net/man/3/clock_gettime. The trouble is when the
95 * running process migrates from one CPU to another, which is more
96 * likely for long-running processes. Therefore we watch for high
97 * differences between the two timings.
99 * This function is subject to further improvements.
101 inline uint64_t timespecDiff(timespec end, timespec start,
102 timespec endCoarse, timespec startCoarse) {
103 auto fine = timespecDiff(end, start);
104 auto coarse = timespecDiff(endCoarse, startCoarse);
105 if (coarse - fine >= 1000000) {
106 // The fine time is in all likelihood bogus
112 } // namespace detail
115 * Supporting type for BENCHMARK_SUSPEND defined below.
117 struct BenchmarkSuspender {
118 BenchmarkSuspender() {
119 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
122 BenchmarkSuspender(const BenchmarkSuspender &) = delete;
123 BenchmarkSuspender(BenchmarkSuspender && rhs) noexcept {
125 rhs.start.tv_nsec = rhs.start.tv_sec = 0;
128 BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete;
129 BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) {
130 if (start.tv_nsec > 0 || start.tv_sec > 0) {
134 rhs.start.tv_nsec = rhs.start.tv_sec = 0;
138 ~BenchmarkSuspender() {
139 if (start.tv_nsec > 0 || start.tv_sec > 0) {
145 assert(start.tv_nsec > 0 || start.tv_sec > 0);
147 start.tv_nsec = start.tv_sec = 0;
151 assert(start.tv_nsec == 0 || start.tv_sec == 0);
152 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
156 auto dismissing(F f) -> typename std::result_of<F()>::type {
157 SCOPE_EXIT { rehire(); };
163 * This is for use inside of if-conditions, used in BENCHMARK macros.
164 * If-conditions bypass the explicit on operator bool.
166 explicit operator bool() const {
171 * Accumulates nanoseconds spent outside benchmark.
173 typedef uint64_t NanosecondsSpent;
174 static NanosecondsSpent nsSpent;
179 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &end));
180 nsSpent += detail::timespecDiff(end, start);
188 * Adds a benchmark. Usually not called directly but instead through
189 * the macro BENCHMARK defined below. The lambda function involved
190 * must take exactly one parameter of type unsigned, and the benchmark
191 * uses it with counter semantics (iteration occurs inside the
194 template <typename Lambda>
195 typename std::enable_if<
196 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
199 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
200 auto execute = [=](unsigned int times) {
201 BenchmarkSuspender::nsSpent = 0;
205 // CORE MEASUREMENT STARTS
206 auto const r1 = clock_gettime(detail::DEFAULT_CLOCK_ID, &start);
207 niter = lambda(times);
208 auto const r2 = clock_gettime(detail::DEFAULT_CLOCK_ID, &end);
209 // CORE MEASUREMENT ENDS
214 return detail::TimeIterPair(
215 detail::timespecDiff(end, start) - BenchmarkSuspender::nsSpent,
219 detail::addBenchmarkImpl(file, name,
220 std::function<detail::TimeIterPair(unsigned int)>(execute));
224 * Adds a benchmark. Usually not called directly but instead through
225 * the macro BENCHMARK defined below. The lambda function involved
226 * must take zero parameters, and the benchmark calls it repeatedly
227 * (iteration occurs outside the function).
229 template <typename Lambda>
230 typename std::enable_if<
231 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
234 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
235 addBenchmark(file, name, [=](unsigned int times) {
236 unsigned int niter = 0;
237 while (times-- > 0) {
245 * Call doNotOptimizeAway(var) against variables that you use for
246 * benchmarking but otherwise are useless. The compiler tends to do a
247 * good job at eliminating unused variables, and this function fools
248 * it into thinking var is in fact needed.
252 #pragma optimize("", off)
255 void doNotOptimizeAway(T&& datum) {
259 #pragma optimize("", on)
261 #elif defined(__clang__)
264 __attribute__((__optnone__)) void doNotOptimizeAway(T&& /* datum */) {}
269 void doNotOptimizeAway(T&& datum) {
270 asm volatile("" : "+r" (datum));
278 * Introduces a benchmark function. Used internally, see BENCHMARK and
281 #define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \
282 static void funName(paramType); \
283 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
284 ::folly::addBenchmark(__FILE__, stringName, \
285 [](paramType paramName) -> unsigned { funName(paramName); \
288 static void funName(paramType paramName)
291 * Introduces a benchmark function with support for returning the actual
292 * number of iterations. Used internally, see BENCHMARK_MULTI and friends
295 #define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \
296 static unsigned funName(paramType); \
297 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
298 ::folly::addBenchmark(__FILE__, stringName, \
299 [](paramType paramName) { return funName(paramName); }), \
301 static unsigned funName(paramType paramName)
304 * Introduces a benchmark function. Use with either one or two arguments.
305 * The first is the name of the benchmark. Use something descriptive, such
306 * as insertVectorBegin. The second argument may be missing, or could be a
307 * symbolic counter. The counter dictates how many internal iteration the
308 * benchmark does. Example:
310 * BENCHMARK(vectorPushBack) {
315 * BENCHMARK(insertVectorBegin, n) {
317 * FOR_EACH_RANGE (i, 0, n) {
318 * v.insert(v.begin(), 42);
322 #define BENCHMARK(name, ...) \
325 FB_STRINGIZE(name), \
326 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
327 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
331 * Like BENCHMARK above, but allows the user to return the actual
332 * number of iterations executed in the function body. This can be
333 * useful if the benchmark function doesn't know upfront how many
334 * iterations it's going to run or if it runs through a certain
335 * number of test cases, e.g.:
337 * BENCHMARK_MULTI(benchmarkSomething) {
338 * std::vector<int> testCases { 0, 1, 1, 2, 3, 5 };
339 * for (int c : testCases) {
342 * return testCases.size();
345 #define BENCHMARK_MULTI(name, ...) \
346 BENCHMARK_MULTI_IMPL( \
348 FB_STRINGIZE(name), \
349 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
353 * Defines a benchmark that passes a parameter to another one. This is
354 * common for benchmarks that need a "problem size" in addition to
355 * "number of iterations". Consider:
357 * void pushBack(uint n, size_t initialSize) {
359 * BENCHMARK_SUSPEND {
360 * v.resize(initialSize);
362 * FOR_EACH_RANGE (i, 0, n) {
366 * BENCHMARK_PARAM(pushBack, 0)
367 * BENCHMARK_PARAM(pushBack, 1000)
368 * BENCHMARK_PARAM(pushBack, 1000000)
370 * The benchmark above estimates the speed of push_back at different
371 * initial sizes of the vector. The framework will pass 0, 1000, and
372 * 1000000 for initialSize, and the iteration count for n.
374 #define BENCHMARK_PARAM(name, param) \
375 BENCHMARK_NAMED_PARAM(name, param, param)
378 * Same as BENCHMARK_PARAM, but allows one to return the actual number of
379 * iterations that have been run.
381 #define BENCHMARK_PARAM_MULTI(name, param) \
382 BENCHMARK_NAMED_PARAM_MULTI(name, param, param)
385 * Like BENCHMARK_PARAM(), but allows a custom name to be specified for each
386 * parameter, rather than using the parameter value.
388 * Useful when the parameter value is not a valid token for string pasting,
389 * of when you want to specify multiple parameter arguments.
393 * void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) {
394 * Histogram<int64_t> hist(bucketSize, min, max);
396 * FOR_EACH_RANGE (i, 0, n) {
397 * hist.addValue(num);
399 * if (num > max) { num = min; }
403 * BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100)
404 * BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000)
405 * BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000)
407 #define BENCHMARK_NAMED_PARAM(name, param_name, ...) \
409 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
410 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
414 name(iters, ## __VA_ARGS__); \
418 * Same as BENCHMARK_NAMED_PARAM, but allows one to return the actual number
419 * of iterations that have been run.
421 #define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \
422 BENCHMARK_MULTI_IMPL( \
423 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
424 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
427 return name(iters, ## __VA_ARGS__); \
431 * Just like BENCHMARK, but prints the time relative to a
432 * baseline. The baseline is the most recent BENCHMARK() seen in
433 * lexical order. Example:
435 * // This is the baseline
436 * BENCHMARK(insertVectorBegin, n) {
438 * FOR_EACH_RANGE (i, 0, n) {
439 * v.insert(v.begin(), 42);
443 * BENCHMARK_RELATIVE(insertListBegin, n) {
445 * FOR_EACH_RANGE (i, 0, n) {
446 * s.insert(s.begin(), 42);
450 * Any number of relative benchmark can be associated with a
451 * baseline. Another BENCHMARK() occurrence effectively establishes a
454 #define BENCHMARK_RELATIVE(name, ...) \
457 "%" FB_STRINGIZE(name), \
458 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
459 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
463 * Same as BENCHMARK_RELATIVE, but allows one to return the actual number
464 * of iterations that have been run.
466 #define BENCHMARK_RELATIVE_MULTI(name, ...) \
467 BENCHMARK_MULTI_IMPL( \
469 "%" FB_STRINGIZE(name), \
470 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
474 * A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM.
476 #define BENCHMARK_RELATIVE_PARAM(name, param) \
477 BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param)
480 * Same as BENCHMARK_RELATIVE_PARAM, but allows one to return the actual
481 * number of iterations that have been run.
483 #define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \
484 BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param)
487 * A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM.
489 #define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \
491 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
492 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
496 name(iters, ## __VA_ARGS__); \
500 * Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows one to return the
501 * actual number of iterations that have been run.
503 #define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \
504 BENCHMARK_MULTI_IMPL( \
505 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
506 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
509 return name(iters, ## __VA_ARGS__); \
513 * Draws a line of dashes.
515 #define BENCHMARK_DRAW_LINE() \
516 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
517 ::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
521 * Allows execution of code that doesn't count torward the benchmark's
522 * time budget. Example:
524 * BENCHMARK_START_GROUP(insertVectorBegin, n) {
526 * BENCHMARK_SUSPEND {
529 * FOR_EACH_RANGE (i, 0, n) {
530 * v.insert(v.begin(), 42);
534 #define BENCHMARK_SUSPEND \
535 if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
536 ::folly::BenchmarkSuspender()) {} \