2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_BENCHMARK_H_
18 #define FOLLY_BENCHMARK_H_
20 #include <folly/Portability.h>
21 #include <folly/Preprocessor.h> // for FB_ANONYMOUS_VARIABLE
24 #include <boost/function_types/function_arity.hpp>
26 #include <glog/logging.h>
27 #include <gflags/gflags.h>
30 DECLARE_bool(benchmark);
35 * Runs all benchmarks defined. Usually put in main().
40 * Runs all benchmarks defined if and only if the --benchmark flag has
41 * been passed to the program. Usually put in main().
43 inline bool runBenchmarksOnFlag() {
44 if (FLAGS_benchmark) {
47 return FLAGS_benchmark;
53 * This is the clock ID used for measuring time. On older kernels, the
54 * resolution of this clock will be very coarse, which will cause the
57 enum Clock { DEFAULT_CLOCK_ID = CLOCK_REALTIME };
59 typedef std::pair<uint64_t, unsigned int> TimeIterPair;
62 * Adds a benchmark wrapped in a std::function. Only used
63 * internally. Pass by value is intentional.
65 void addBenchmarkImpl(const char* file,
67 std::function<TimeIterPair(unsigned int)>);
70 * Takes the difference between two timespec values. end is assumed to
73 inline uint64_t timespecDiff(timespec end, timespec start) {
74 if (end.tv_sec == start.tv_sec) {
75 assert(end.tv_nsec >= start.tv_nsec);
76 return end.tv_nsec - start.tv_nsec;
78 assert(end.tv_sec > start.tv_sec);
79 auto diff = uint64_t(end.tv_sec - start.tv_sec);
81 std::numeric_limits<uint64_t>::max() / 1000000000UL);
82 return diff * 1000000000UL
83 + end.tv_nsec - start.tv_nsec;
87 * Takes the difference between two sets of timespec values. The first
88 * two come from a high-resolution clock whereas the other two come
89 * from a low-resolution clock. The crux of the matter is that
90 * high-res values may be bogus as documented in
91 * http://linux.die.net/man/3/clock_gettime. The trouble is when the
92 * running process migrates from one CPU to another, which is more
93 * likely for long-running processes. Therefore we watch for high
94 * differences between the two timings.
96 * This function is subject to further improvements.
98 inline uint64_t timespecDiff(timespec end, timespec start,
99 timespec endCoarse, timespec startCoarse) {
100 auto fine = timespecDiff(end, start);
101 auto coarse = timespecDiff(endCoarse, startCoarse);
102 if (coarse - fine >= 1000000) {
103 // The fine time is in all likelihood bogus
109 } // namespace detail
112 * Supporting type for BENCHMARK_SUSPEND defined below.
114 struct BenchmarkSuspender {
115 BenchmarkSuspender() {
116 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
119 BenchmarkSuspender(const BenchmarkSuspender &) = delete;
120 BenchmarkSuspender(BenchmarkSuspender && rhs) noexcept {
122 rhs.start.tv_nsec = rhs.start.tv_sec = 0;
125 BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete;
126 BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) {
127 if (start.tv_nsec > 0 || start.tv_sec > 0) {
131 rhs.start.tv_nsec = rhs.start.tv_sec = 0;
135 ~BenchmarkSuspender() {
136 if (start.tv_nsec > 0 || start.tv_sec > 0) {
142 assert(start.tv_nsec > 0 || start.tv_sec > 0);
144 start.tv_nsec = start.tv_sec = 0;
148 assert(start.tv_nsec == 0 || start.tv_sec == 0);
149 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &start));
153 * This is for use inside of if-conditions, used in BENCHMARK macros.
154 * If-conditions bypass the explicit on operator bool.
156 explicit operator bool() const {
161 * Accumulates nanoseconds spent outside benchmark.
163 typedef uint64_t NanosecondsSpent;
164 static NanosecondsSpent nsSpent;
169 CHECK_EQ(0, clock_gettime(detail::DEFAULT_CLOCK_ID, &end));
170 nsSpent += detail::timespecDiff(end, start);
178 * Adds a benchmark. Usually not called directly but instead through
179 * the macro BENCHMARK defined below. The lambda function involved
180 * must take exactly one parameter of type unsigned, and the benchmark
181 * uses it with counter semantics (iteration occurs inside the
184 template <typename Lambda>
185 typename std::enable_if<
186 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
189 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
190 auto execute = [=](unsigned int times) {
191 BenchmarkSuspender::nsSpent = 0;
195 // CORE MEASUREMENT STARTS
196 auto const r1 = clock_gettime(detail::DEFAULT_CLOCK_ID, &start);
197 niter = lambda(times);
198 auto const r2 = clock_gettime(detail::DEFAULT_CLOCK_ID, &end);
199 // CORE MEASUREMENT ENDS
204 return detail::TimeIterPair(
205 detail::timespecDiff(end, start) - BenchmarkSuspender::nsSpent,
209 detail::addBenchmarkImpl(file, name,
210 std::function<detail::TimeIterPair(unsigned int)>(execute));
214 * Adds a benchmark. Usually not called directly but instead through
215 * the macro BENCHMARK defined below. The lambda function involved
216 * must take zero parameters, and the benchmark calls it repeatedly
217 * (iteration occurs outside the function).
219 template <typename Lambda>
220 typename std::enable_if<
221 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
224 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
225 addBenchmark(file, name, [=](unsigned int times) {
226 unsigned int niter = 0;
227 while (times-- > 0) {
235 * Call doNotOptimizeAway(var) against variables that you use for
236 * benchmarking but otherwise are useless. The compiler tends to do a
237 * good job at eliminating unused variables, and this function fools
238 * it into thinking var is in fact needed.
242 #pragma optimize("", off)
245 void doNotOptimizeAway(T&& datum) {
249 #pragma optimize("", on)
253 void doNotOptimizeAway(T&& datum) {
254 asm volatile("" : "+r" (datum));
261 * Introduces a benchmark function. Used internally, see BENCHMARK and
264 #define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \
265 static void funName(paramType); \
266 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
267 ::folly::addBenchmark(__FILE__, stringName, \
268 [](paramType paramName) -> unsigned { funName(paramName); \
271 static void funName(paramType paramName)
274 * Introduces a benchmark function with support for returning the actual
275 * number of iterations. Used internally, see BENCHMARK_MULTI and friends
278 #define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \
279 static unsigned funName(paramType); \
280 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
281 ::folly::addBenchmark(__FILE__, stringName, \
282 [](paramType paramName) { return funName(paramName); }), \
284 static unsigned funName(paramType paramName)
287 * Introduces a benchmark function. Use with either one one or two
288 * arguments. The first is the name of the benchmark. Use something
289 * descriptive, such as insertVectorBegin. The second argument may be
290 * missing, or could be a symbolic counter. The counter dictates how
291 * many internal iteration the benchmark does. Example:
293 * BENCHMARK(vectorPushBack) {
298 * BENCHMARK(insertVectorBegin, n) {
300 * FOR_EACH_RANGE (i, 0, n) {
301 * v.insert(v.begin(), 42);
305 #define BENCHMARK(name, ...) \
308 FB_STRINGIZE(name), \
309 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
310 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
314 * Like BENCHMARK above, but allows the user to return the actual
315 * number of iterations executed in the function body. This can be
316 * useful if the benchmark function doesn't know upfront how many
317 * iterations it's going to run or if it runs through a certain
318 * number of test cases, e.g.:
320 * BENCHMARK_MULTI(benchmarkSomething) {
321 * std::vector<int> testCases { 0, 1, 1, 2, 3, 5 };
322 * for (int c : testCases) {
325 * return testCases.size();
328 #define BENCHMARK_MULTI(name, ...) \
329 BENCHMARK_MULTI_IMPL( \
331 FB_STRINGIZE(name), \
332 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
336 * Defines a benchmark that passes a parameter to another one. This is
337 * common for benchmarks that need a "problem size" in addition to
338 * "number of iterations". Consider:
340 * void pushBack(uint n, size_t initialSize) {
342 * BENCHMARK_SUSPEND {
343 * v.resize(initialSize);
345 * FOR_EACH_RANGE (i, 0, n) {
349 * BENCHMARK_PARAM(pushBack, 0)
350 * BENCHMARK_PARAM(pushBack, 1000)
351 * BENCHMARK_PARAM(pushBack, 1000000)
353 * The benchmark above estimates the speed of push_back at different
354 * initial sizes of the vector. The framework will pass 0, 1000, and
355 * 1000000 for initialSize, and the iteration count for n.
357 #define BENCHMARK_PARAM(name, param) \
358 BENCHMARK_NAMED_PARAM(name, param, param)
361 * Same as BENCHMARK_PARAM, but allows to return the actual number of
362 * iterations that have been run.
364 #define BENCHMARK_PARAM_MULTI(name, param) \
365 BENCHMARK_NAMED_PARAM_MULTI(name, param, param)
368 * Like BENCHMARK_PARAM(), but allows a custom name to be specified for each
369 * parameter, rather than using the parameter value.
371 * Useful when the parameter value is not a valid token for string pasting,
372 * of when you want to specify multiple parameter arguments.
376 * void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) {
377 * Histogram<int64_t> hist(bucketSize, min, max);
379 * FOR_EACH_RANGE (i, 0, n) {
380 * hist.addValue(num);
382 * if (num > max) { num = min; }
386 * BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100)
387 * BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000)
388 * BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000)
390 #define BENCHMARK_NAMED_PARAM(name, param_name, ...) \
392 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
393 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
397 name(iters, ## __VA_ARGS__); \
401 * Same as BENCHMARK_NAMED_PARAM, but allows to return the actual number
402 * of iterations that have been run.
404 #define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \
405 BENCHMARK_MULTI_IMPL( \
406 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
407 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
410 return name(iters, ## __VA_ARGS__); \
414 * Just like BENCHMARK, but prints the time relative to a
415 * baseline. The baseline is the most recent BENCHMARK() seen in
416 * lexical order. Example:
418 * // This is the baseline
419 * BENCHMARK(insertVectorBegin, n) {
421 * FOR_EACH_RANGE (i, 0, n) {
422 * v.insert(v.begin(), 42);
426 * BENCHMARK_RELATIVE(insertListBegin, n) {
428 * FOR_EACH_RANGE (i, 0, n) {
429 * s.insert(s.begin(), 42);
433 * Any number of relative benchmark can be associated with a
434 * baseline. Another BENCHMARK() occurrence effectively establishes a
437 #define BENCHMARK_RELATIVE(name, ...) \
440 "%" FB_STRINGIZE(name), \
441 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
442 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
446 * Same as BENCHMARK_RELATIVE, but allows to return the actual number
447 * of iterations that have been run.
449 #define BENCHMARK_RELATIVE_MULTI(name, ...) \
450 BENCHMARK_MULTI_IMPL( \
452 "%" FB_STRINGIZE(name), \
453 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
457 * A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM.
459 #define BENCHMARK_RELATIVE_PARAM(name, param) \
460 BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param)
463 * Same as BENCHMARK_RELATIVE_PARAM, but allows to return the actual
464 * number of iterations that have been run.
466 #define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \
467 BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param)
470 * A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM.
472 #define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \
474 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
475 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
479 name(iters, ## __VA_ARGS__); \
483 * Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows to return the
484 * actual number of iterations that have been run.
486 #define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \
487 BENCHMARK_MULTI_IMPL( \
488 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
489 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
492 return name(iters, ## __VA_ARGS__); \
496 * Draws a line of dashes.
498 #define BENCHMARK_DRAW_LINE() \
499 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
500 ::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
504 * Allows execution of code that doesn't count torward the benchmark's
505 * time budget. Example:
507 * BENCHMARK_START_GROUP(insertVectorBegin, n) {
509 * BENCHMARK_SUSPEND {
512 * FOR_EACH_RANGE (i, 0, n) {
513 * v.insert(v.begin(), 42);
517 #define BENCHMARK_SUSPEND \
518 if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
519 ::folly::BenchmarkSuspender()) {} \
522 #endif // FOLLY_BENCHMARK_H_