2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_BENCHMARK_H_
18 #define FOLLY_BENCHMARK_H_
20 #include <folly/Portability.h>
21 #include <folly/Preprocessor.h> // for FB_ANONYMOUS_VARIABLE
24 #include <boost/function_types/function_arity.hpp>
26 #include <glog/logging.h>
27 #include <gflags/gflags.h>
30 DECLARE_bool(benchmark);
35 * Runs all benchmarks defined. Usually put in main().
40 * Runs all benchmarks defined if and only if the --benchmark flag has
41 * been passed to the program. Usually put in main().
43 inline bool runBenchmarksOnFlag() {
44 if (FLAGS_benchmark) {
47 return FLAGS_benchmark;
53 * This is the clock ID used for measuring time. On older kernels, the
54 * resolution of this clock will be very coarse, which will cause the
57 enum Clock { DEFAULT_CLOCK_ID = CLOCK_REALTIME };
59 typedef std::pair<uint64_t, unsigned int> TimeIterPair;
62 * Adds a benchmark wrapped in a std::function. Only used
63 * internally. Pass by value is intentional.
65 void addBenchmarkImpl(const char* file,
67 std::function<TimeIterPair(unsigned int)>);
70 * Takes the difference between two timespec values. end is assumed to
73 inline uint64_t timespecDiff(timespec end, timespec start) {
74 if (end.tv_sec == start.tv_sec) {
75 assert(end.tv_nsec >= start.tv_nsec);
76 return end.tv_nsec - start.tv_nsec;
78 assert(end.tv_sec > start.tv_sec &&
79 (uint64_t)(end.tv_sec - start.tv_sec) <
80 std::numeric_limits<uint64_t>::max() / 1000000000UL);
81 return (end.tv_sec - start.tv_sec) * 1000000000UL
82 + end.tv_nsec - start.tv_nsec;
86 * Takes the difference between two sets of timespec values. The first
87 * two come from a high-resolution clock whereas the other two come
88 * from a low-resolution clock. The crux of the matter is that
89 * high-res values may be bogus as documented in
90 * http://linux.die.net/man/3/clock_gettime. The trouble is when the
91 * running process migrates from one CPU to another, which is more
92 * likely for long-running processes. Therefore we watch for high
93 * differences between the two timings.
95 * This function is subject to further improvements.
97 inline uint64_t timespecDiff(timespec end, timespec start,
98 timespec endCoarse, timespec startCoarse) {
99 auto fine = timespecDiff(end, start);
100 auto coarse = timespecDiff(endCoarse, startCoarse);
101 if (coarse - fine >= 1000000) {
102 // The fine time is in all likelihood bogus
108 } // namespace detail
111 * Supporting type for BENCHMARK_SUSPEND defined below.
113 struct BenchmarkSuspender {
114 BenchmarkSuspender() {
115 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
118 BenchmarkSuspender(const BenchmarkSuspender &) = delete;
119 BenchmarkSuspender(BenchmarkSuspender && rhs) noexcept {
121 rhs.start.tv_nsec = rhs.start.tv_sec = 0;
124 BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete;
125 BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) {
126 if (start.tv_nsec > 0 || start.tv_sec > 0) {
130 rhs.start.tv_nsec = rhs.start.tv_sec = 0;
134 ~BenchmarkSuspender() {
135 if (start.tv_nsec > 0 || start.tv_sec > 0) {
141 assert(start.tv_nsec > 0 || start.tv_sec > 0);
143 start.tv_nsec = start.tv_sec = 0;
147 assert(start.tv_nsec == 0 || start.tv_sec == 0);
148 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
152 * This is for use inside of if-conditions, used in BENCHMARK macros.
153 * If-conditions bypass the explicit on operator bool.
155 explicit operator bool() const {
160 * Accumulates nanoseconds spent outside benchmark.
162 typedef uint64_t NanosecondsSpent;
163 static NanosecondsSpent nsSpent;
168 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &end));
169 nsSpent += detail::timespecDiff(end, start);
177 * Adds a benchmark. Usually not called directly but instead through
178 * the macro BENCHMARK defined below. The lambda function involved
179 * must take exactly one parameter of type unsigned, and the benchmark
180 * uses it with counter semantics (iteration occurs inside the
183 template <typename Lambda>
184 typename std::enable_if<
185 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
188 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
189 auto execute = [=](unsigned int times) {
190 BenchmarkSuspender::nsSpent = 0;
194 // CORE MEASUREMENT STARTS
195 auto const r1 = clock_gettime(detail::DEFAULT_CLOCK_ID, &start);
196 niter = lambda(times);
197 auto const r2 = clock_gettime(detail::DEFAULT_CLOCK_ID, &end);
198 // CORE MEASUREMENT ENDS
203 return detail::TimeIterPair(
204 detail::timespecDiff(end, start) - BenchmarkSuspender::nsSpent,
208 detail::addBenchmarkImpl(file, name,
209 std::function<detail::TimeIterPair(unsigned int)>(execute));
213 * Adds a benchmark. Usually not called directly but instead through
214 * the macro BENCHMARK defined below. The lambda function involved
215 * must take zero parameters, and the benchmark calls it repeatedly
216 * (iteration occurs outside the function).
218 template <typename Lambda>
219 typename std::enable_if<
220 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
223 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
224 addBenchmark(file, name, [=](unsigned int times) {
225 unsigned int niter = 0;
226 while (times-- > 0) {
234 * Call doNotOptimizeAway(var) against variables that you use for
235 * benchmarking but otherwise are useless. The compiler tends to do a
236 * good job at eliminating unused variables, and this function fools
237 * it into thinking var is in fact needed.
241 #pragma optimize("", off)
244 void doNotOptimizeAway(T&& datum) {
248 #pragma optimize("", on)
252 void doNotOptimizeAway(T&& datum) {
253 asm volatile("" : "+r" (datum));
260 * Introduces a benchmark function. Used internally, see BENCHMARK and
263 #define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \
264 static void funName(paramType); \
265 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
266 ::folly::addBenchmark(__FILE__, stringName, \
267 [](paramType paramName) -> unsigned { funName(paramName); \
270 static void funName(paramType paramName)
273 * Introduces a benchmark function with support for returning the actual
274 * number of iterations. Used internally, see BENCHMARK_MULTI and friends
277 #define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \
278 static unsigned funName(paramType); \
279 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
280 ::folly::addBenchmark(__FILE__, stringName, \
281 [](paramType paramName) { return funName(paramName); }), \
283 static unsigned funName(paramType paramName)
286 * Introduces a benchmark function. Use with either one one or two
287 * arguments. The first is the name of the benchmark. Use something
288 * descriptive, such as insertVectorBegin. The second argument may be
289 * missing, or could be a symbolic counter. The counter dictates how
290 * many internal iteration the benchmark does. Example:
292 * BENCHMARK(vectorPushBack) {
297 * BENCHMARK(insertVectorBegin, n) {
299 * FOR_EACH_RANGE (i, 0, n) {
300 * v.insert(v.begin(), 42);
304 #define BENCHMARK(name, ...) \
307 FB_STRINGIZE(name), \
308 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
309 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
313 * Like BENCHMARK above, but allows the user to return the actual
314 * number of iterations executed in the function body. This can be
315 * useful if the benchmark function doesn't know upfront how many
316 * iterations it's going to run or if it runs through a certain
317 * number of test cases, e.g.:
319 * BENCHMARK_MULTI(benchmarkSomething) {
320 * std::vector<int> testCases { 0, 1, 1, 2, 3, 5 };
321 * for (int c : testCases) {
324 * return testCases.size();
327 #define BENCHMARK_MULTI(name, ...) \
328 BENCHMARK_MULTI_IMPL( \
330 FB_STRINGIZE(name), \
331 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
335 * Defines a benchmark that passes a parameter to another one. This is
336 * common for benchmarks that need a "problem size" in addition to
337 * "number of iterations". Consider:
339 * void pushBack(uint n, size_t initialSize) {
341 * BENCHMARK_SUSPEND {
342 * v.resize(initialSize);
344 * FOR_EACH_RANGE (i, 0, n) {
348 * BENCHMARK_PARAM(pushBack, 0)
349 * BENCHMARK_PARAM(pushBack, 1000)
350 * BENCHMARK_PARAM(pushBack, 1000000)
352 * The benchmark above estimates the speed of push_back at different
353 * initial sizes of the vector. The framework will pass 0, 1000, and
354 * 1000000 for initialSize, and the iteration count for n.
356 #define BENCHMARK_PARAM(name, param) \
357 BENCHMARK_NAMED_PARAM(name, param, param)
360 * Same as BENCHMARK_PARAM, but allows to return the actual number of
361 * iterations that have been run.
363 #define BENCHMARK_PARAM_MULTI(name, param) \
364 BENCHMARK_NAMED_PARAM_MULTI(name, param, param)
367 * Like BENCHMARK_PARAM(), but allows a custom name to be specified for each
368 * parameter, rather than using the parameter value.
370 * Useful when the parameter value is not a valid token for string pasting,
371 * of when you want to specify multiple parameter arguments.
375 * void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) {
376 * Histogram<int64_t> hist(bucketSize, min, max);
378 * FOR_EACH_RANGE (i, 0, n) {
379 * hist.addValue(num);
381 * if (num > max) { num = min; }
385 * BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100)
386 * BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000)
387 * BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000)
389 #define BENCHMARK_NAMED_PARAM(name, param_name, ...) \
391 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
392 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
396 name(iters, ## __VA_ARGS__); \
400 * Same as BENCHMARK_NAMED_PARAM, but allows to return the actual number
401 * of iterations that have been run.
403 #define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \
404 BENCHMARK_MULTI_IMPL( \
405 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
406 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
409 return name(iters, ## __VA_ARGS__); \
413 * Just like BENCHMARK, but prints the time relative to a
414 * baseline. The baseline is the most recent BENCHMARK() seen in
415 * lexical order. Example:
417 * // This is the baseline
418 * BENCHMARK(insertVectorBegin, n) {
420 * FOR_EACH_RANGE (i, 0, n) {
421 * v.insert(v.begin(), 42);
425 * BENCHMARK_RELATIVE(insertListBegin, n) {
427 * FOR_EACH_RANGE (i, 0, n) {
428 * s.insert(s.begin(), 42);
432 * Any number of relative benchmark can be associated with a
433 * baseline. Another BENCHMARK() occurrence effectively establishes a
436 #define BENCHMARK_RELATIVE(name, ...) \
439 "%" FB_STRINGIZE(name), \
440 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
441 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
445 * Same as BENCHMARK_RELATIVE, but allows to return the actual number
446 * of iterations that have been run.
448 #define BENCHMARK_RELATIVE_MULTI(name, ...) \
449 BENCHMARK_MULTI_IMPL( \
451 "%" FB_STRINGIZE(name), \
452 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
456 * A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM.
458 #define BENCHMARK_RELATIVE_PARAM(name, param) \
459 BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param)
462 * Same as BENCHMARK_RELATIVE_PARAM, but allows to return the actual
463 * number of iterations that have been run.
465 #define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \
466 BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param)
469 * A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM.
471 #define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \
473 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
474 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
478 name(iters, ## __VA_ARGS__); \
482 * Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows to return the
483 * actual number of iterations that have been run.
485 #define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \
486 BENCHMARK_MULTI_IMPL( \
487 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
488 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
491 return name(iters, ## __VA_ARGS__); \
495 * Draws a line of dashes.
497 #define BENCHMARK_DRAW_LINE() \
498 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
499 ::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
503 * Allows execution of code that doesn't count torward the benchmark's
504 * time budget. Example:
506 * BENCHMARK_START_GROUP(insertVectorBegin, n) {
508 * BENCHMARK_SUSPEND {
511 * FOR_EACH_RANGE (i, 0, n) {
512 * v.insert(v.begin(), 42);
516 #define BENCHMARK_SUSPEND \
517 if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
518 ::folly::BenchmarkSuspender()) {} \
521 #endif // FOLLY_BENCHMARK_H_