2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/stats/BucketedTimeSeries.h>
18 #include <folly/stats/BucketedTimeSeries-defs.h>
19 #include <folly/stats/MultiLevelTimeSeries.h>
20 #include <folly/stats/MultiLevelTimeSeries-defs.h>
24 #include <glog/logging.h>
26 #include <folly/Foreach.h>
27 #include <folly/portability/GTest.h>
29 using std::chrono::seconds;
32 using folly::BucketedTimeSeries;
34 using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>;
35 using TimePoint = StatsClock::time_point;
38 * Helper functions to allow us to directly log time points and duration
41 std::ostream& operator<<(std::ostream& os, std::chrono::seconds s) {
45 std::ostream& operator<<(std::ostream& os, TimePoint tp) {
46 os << tp.time_since_epoch().count();
52 TimePoint mkTimePoint(int value) {
53 return TimePoint(StatsClock::duration(value));
57 TestData(int d, int b, std::initializer_list<int> starts)
58 : duration(d), numBuckets(b) {
59 bucketStarts.reserve(starts.size());
60 for (int s : starts) {
61 bucketStarts.push_back(mkTimePoint(s));
66 vector<TimePoint> bucketStarts;
68 vector<TestData> testData = {
69 // 71 seconds x 4 buckets
70 { 71, 4, {0, 18, 36, 54}},
71 // 100 seconds x 10 buckets
72 { 100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}},
73 // 10 seconds x 10 buckets
74 { 10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
75 // 10 seconds x 1 buckets
77 // 1 second x 1 buckets
82 TEST(BucketedTimeSeries, getBucketInfo) {
83 for (const auto& data : testData) {
84 BucketedTimeSeries<int64_t> ts(data.numBuckets, data.duration);
86 for (uint32_t n = 0; n < 10000; n += 1234) {
87 seconds offset(n * data.duration);
89 for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
90 auto bucketStart = data.bucketStarts[idx];
91 TimePoint nextBucketStart;
92 if (idx + 1 < data.numBuckets) {
93 nextBucketStart = data.bucketStarts[idx + 1];
95 nextBucketStart = TimePoint(data.duration);
98 TimePoint expectedStart = offset + bucketStart;
99 TimePoint expectedNextStart = offset + nextBucketStart;
101 expectedStart + (expectedNextStart - expectedStart) / 2;
103 vector<std::pair<string, TimePoint>> timePoints = {
104 {"expectedStart", expectedStart},
105 {"midpoint", midpoint},
106 {"expectedEnd", expectedNextStart - seconds(1)},
109 for (const auto& point : timePoints) {
110 // Check that getBucketIdx() returns the expected index
111 EXPECT_EQ(idx, ts.getBucketIdx(point.second))
112 << data.duration << "x" << data.numBuckets << ": " << point.first
113 << "=" << point.second;
115 // Check the data returned by getBucketInfo()
117 TimePoint returnedStart;
118 TimePoint returnedNextStart;
119 ts.getBucketInfo(expectedStart, &returnedIdx,
120 &returnedStart, &returnedNextStart);
121 EXPECT_EQ(idx, returnedIdx) << data.duration << "x" << data.numBuckets
122 << ": " << point.first << "="
124 EXPECT_EQ(expectedStart, returnedStart)
125 << data.duration << "x" << data.numBuckets << ": " << point.first
126 << "=" << point.second;
127 EXPECT_EQ(expectedNextStart, returnedNextStart)
128 << data.duration << "x" << data.numBuckets << ": " << point.first
129 << "=" << point.second;
136 void testUpdate100x10(size_t offset) {
137 // This test code only works when offset is a multiple of the bucket width
138 CHECK_EQ(0, offset % 10);
140 // Create a 100 second timeseries, with 10 buckets
141 BucketedTimeSeries<int64_t> ts(10, seconds(100));
145 // Add 1 value to each bucket
146 for (int n = 5; n <= 95; n += 10) {
147 ts.addValue(seconds(n + offset), 6);
150 EXPECT_EQ(10, ts.count());
151 EXPECT_EQ(60, ts.sum());
152 EXPECT_EQ(6, ts.avg());
155 // Update 2 buckets forwards. This should throw away 2 data points.
157 ts.update(seconds(110 + offset));
158 EXPECT_EQ(8, ts.count());
159 EXPECT_EQ(48, ts.sum());
160 EXPECT_EQ(6, ts.avg());
162 // The last time we added was 95.
163 // Try updating to 189. This should clear everything but the last bucket.
165 ts.update(seconds(151 + offset));
166 EXPECT_EQ(4, ts.count());
167 //EXPECT_EQ(6, ts.sum());
168 EXPECT_EQ(6, ts.avg());
170 // The last time we added was 95.
171 // Try updating to 193: This is nearly one full loop around,
172 // back to the same bucket. update() needs to clear everything
174 ts.update(seconds(193 + offset));
175 EXPECT_EQ(0, ts.count());
176 EXPECT_EQ(0, ts.sum());
177 EXPECT_EQ(0, ts.avg());
179 // The last time we added was 95.
180 // Try updating to 197: This is slightly over one full loop around,
181 // back to the same bucket. update() needs to clear everything
183 ts.update(seconds(197 + offset));
184 EXPECT_EQ(0, ts.count());
185 EXPECT_EQ(0, ts.sum());
186 EXPECT_EQ(0, ts.avg());
188 // The last time we added was 95.
189 // Try updating to 230: This is well over one full loop around,
190 // and everything should be cleared.
192 ts.update(seconds(230 + offset));
193 EXPECT_EQ(0, ts.count());
194 EXPECT_EQ(0, ts.sum());
195 EXPECT_EQ(0, ts.avg());
198 TEST(BucketedTimeSeries, update100x10) {
199 // Run testUpdate100x10() multiple times, with various offsets.
200 // This makes sure the update code works regardless of which bucket it starts
201 // at in the modulo arithmetic.
203 testUpdate100x10(50);
204 testUpdate100x10(370);
205 testUpdate100x10(1937090);
208 TEST(BucketedTimeSeries, update71x5) {
209 // Create a 71 second timeseries, with 5 buckets
210 // This tests when the number of buckets does not divide evenly into the
212 BucketedTimeSeries<int64_t> ts(5, seconds(71));
216 // Add 1 value to each bucket
217 ts.addValue(seconds(11), 6);
218 ts.addValue(seconds(24), 6);
219 ts.addValue(seconds(42), 6);
220 ts.addValue(seconds(43), 6);
221 ts.addValue(seconds(66), 6);
223 EXPECT_EQ(5, ts.count());
224 EXPECT_EQ(30, ts.sum());
225 EXPECT_EQ(6, ts.avg());
228 // Update 2 buckets forwards. This should throw away 2 data points.
230 ts.update(seconds(99));
231 EXPECT_EQ(3, ts.count());
232 EXPECT_EQ(18, ts.sum());
233 EXPECT_EQ(6, ts.avg());
235 // Update 3 buckets forwards. This should throw away 3 data points.
237 ts.update(seconds(100));
238 EXPECT_EQ(2, ts.count());
239 EXPECT_EQ(12, ts.sum());
240 EXPECT_EQ(6, ts.avg());
242 // Update 4 buckets forwards, just under the wrap limit.
243 // This should throw everything but the last bucket away.
245 ts.update(seconds(127));
246 EXPECT_EQ(1, ts.count());
247 EXPECT_EQ(6, ts.sum());
248 EXPECT_EQ(6, ts.avg());
250 // Update 5 buckets forwards, exactly at the wrap limit.
251 // This should throw everything away.
253 ts.update(seconds(128));
254 EXPECT_EQ(0, ts.count());
255 EXPECT_EQ(0, ts.sum());
256 EXPECT_EQ(0, ts.avg());
258 // Update very far forwards, wrapping multiple times.
259 // This should throw everything away.
261 ts.update(seconds(1234));
262 EXPECT_EQ(0, ts.count());
263 EXPECT_EQ(0, ts.sum());
264 EXPECT_EQ(0, ts.avg());
267 TEST(BucketedTimeSeries, elapsed) {
268 BucketedTimeSeries<int64_t> ts(60, seconds(600));
270 // elapsed() is 0 when no data points have been added
271 EXPECT_EQ(0, ts.elapsed().count());
273 // With exactly 1 data point, elapsed() should report 1 second of data
274 seconds start(239218);
275 ts.addValue(start + seconds(0), 200);
276 EXPECT_EQ(1, ts.elapsed().count());
277 // Adding a data point 10 seconds later should result in an elapsed time of
278 // 11 seconds (the time range is [0, 10], inclusive).
279 ts.addValue(start + seconds(10), 200);
280 EXPECT_EQ(11, ts.elapsed().count());
282 // elapsed() returns to 0 after clear()
284 EXPECT_EQ(0, ts.elapsed().count());
286 // Restart, with the starting point on an easier number to work with
287 ts.addValue(seconds(10), 200);
288 EXPECT_EQ(1, ts.elapsed().count());
289 ts.addValue(seconds(580), 200);
290 EXPECT_EQ(571, ts.elapsed().count());
291 ts.addValue(seconds(590), 200);
292 EXPECT_EQ(581, ts.elapsed().count());
293 ts.addValue(seconds(598), 200);
294 EXPECT_EQ(589, ts.elapsed().count());
295 ts.addValue(seconds(599), 200);
296 EXPECT_EQ(590, ts.elapsed().count());
297 ts.addValue(seconds(600), 200);
298 EXPECT_EQ(591, ts.elapsed().count());
299 ts.addValue(seconds(608), 200);
300 EXPECT_EQ(599, ts.elapsed().count());
301 ts.addValue(seconds(609), 200);
302 EXPECT_EQ(600, ts.elapsed().count());
303 // Once we reach 600 seconds worth of data, when we move on to the next
304 // second a full bucket will get thrown out. Now we drop back down to 591
305 // seconds worth of data
306 ts.addValue(seconds(610), 200);
307 EXPECT_EQ(591, ts.elapsed().count());
308 ts.addValue(seconds(618), 200);
309 EXPECT_EQ(599, ts.elapsed().count());
310 ts.addValue(seconds(619), 200);
311 EXPECT_EQ(600, ts.elapsed().count());
312 ts.addValue(seconds(620), 200);
313 EXPECT_EQ(591, ts.elapsed().count());
314 ts.addValue(seconds(123419), 200);
315 EXPECT_EQ(600, ts.elapsed().count());
316 ts.addValue(seconds(123420), 200);
317 EXPECT_EQ(591, ts.elapsed().count());
318 ts.addValue(seconds(123425), 200);
319 EXPECT_EQ(596, ts.elapsed().count());
321 // Time never moves backwards.
322 // Calling update with an old timestamp will just be ignored.
323 ts.update(seconds(29));
324 EXPECT_EQ(596, ts.elapsed().count());
327 TEST(BucketedTimeSeries, rate) {
328 BucketedTimeSeries<int64_t> ts(60, seconds(600));
330 // Add 3 values every 2 seconds, until fill up the buckets
331 for (size_t n = 0; n < 600; n += 2) {
332 ts.addValue(seconds(n), 200, 3);
335 EXPECT_EQ(900, ts.count());
336 EXPECT_EQ(180000, ts.sum());
337 EXPECT_EQ(200, ts.avg());
339 // Really we only entered 599 seconds worth of data: [0, 598] (inclusive)
340 EXPECT_EQ(599, ts.elapsed().count());
341 EXPECT_NEAR(300.5, ts.rate(), 0.005);
342 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
344 // If we add 1 more second, now we will have 600 seconds worth of data
345 ts.update(seconds(599));
346 EXPECT_EQ(600, ts.elapsed().count());
347 EXPECT_NEAR(300, ts.rate(), 0.005);
348 EXPECT_EQ(300, ts.rate<int>());
349 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
351 // However, 1 more second after that and we will have filled up all the
352 // buckets, and have to drop one.
353 ts.update(seconds(600));
354 EXPECT_EQ(591, ts.elapsed().count());
355 EXPECT_NEAR(299.5, ts.rate(), 0.01);
356 EXPECT_EQ(299, ts.rate<int>());
357 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
360 TEST(BucketedTimeSeries, avgTypeConversion) {
361 // Make sure the computed average values are accurate regardless
362 // of the input type and return type.
365 // Simple sanity tests for small positive integer values
366 BucketedTimeSeries<int64_t> ts(60, seconds(600));
367 ts.addValue(seconds(0), 4, 100);
368 ts.addValue(seconds(0), 10, 200);
369 ts.addValue(seconds(0), 16, 100);
371 EXPECT_DOUBLE_EQ(10.0, ts.avg());
372 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
373 EXPECT_EQ(10, ts.avg<uint64_t>());
374 EXPECT_EQ(10, ts.avg<int64_t>());
375 EXPECT_EQ(10, ts.avg<int32_t>());
376 EXPECT_EQ(10, ts.avg<int16_t>());
377 EXPECT_EQ(10, ts.avg<int8_t>());
378 EXPECT_EQ(10, ts.avg<uint8_t>());
382 // Test signed integer types with negative values
383 BucketedTimeSeries<int64_t> ts(60, seconds(600));
384 ts.addValue(seconds(0), -100);
385 ts.addValue(seconds(0), -200);
386 ts.addValue(seconds(0), -300);
387 ts.addValue(seconds(0), -200, 65535);
389 EXPECT_DOUBLE_EQ(-200.0, ts.avg());
390 EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>());
391 EXPECT_EQ(-200, ts.avg<int64_t>());
392 EXPECT_EQ(-200, ts.avg<int32_t>());
393 EXPECT_EQ(-200, ts.avg<int16_t>());
397 // Test uint64_t values that would overflow int64_t
398 BucketedTimeSeries<uint64_t> ts(60, seconds(600));
399 ts.addValueAggregated(seconds(0),
400 std::numeric_limits<uint64_t>::max(),
401 std::numeric_limits<uint64_t>::max());
403 EXPECT_DOUBLE_EQ(1.0, ts.avg());
404 EXPECT_DOUBLE_EQ(1.0, ts.avg<float>());
405 EXPECT_EQ(1, ts.avg<uint64_t>());
406 EXPECT_EQ(1, ts.avg<int64_t>());
407 EXPECT_EQ(1, ts.avg<int8_t>());
411 // Test doubles with small-ish values that will fit in integer types
412 BucketedTimeSeries<double> ts(60, seconds(600));
413 ts.addValue(seconds(0), 4.0, 100);
414 ts.addValue(seconds(0), 10.0, 200);
415 ts.addValue(seconds(0), 16.0, 100);
417 EXPECT_DOUBLE_EQ(10.0, ts.avg());
418 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
419 EXPECT_EQ(10, ts.avg<uint64_t>());
420 EXPECT_EQ(10, ts.avg<int64_t>());
421 EXPECT_EQ(10, ts.avg<int32_t>());
422 EXPECT_EQ(10, ts.avg<int16_t>());
423 EXPECT_EQ(10, ts.avg<int8_t>());
424 EXPECT_EQ(10, ts.avg<uint8_t>());
428 // Test doubles with huge values
429 BucketedTimeSeries<double> ts(60, seconds(600));
430 ts.addValue(seconds(0), 1e19, 100);
431 ts.addValue(seconds(0), 2e19, 200);
432 ts.addValue(seconds(0), 3e19, 100);
434 EXPECT_DOUBLE_EQ(ts.avg(), 2e19);
435 EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11);
439 // Test doubles where the sum adds up larger than a uint64_t,
440 // but the average fits in an int64_t
441 BucketedTimeSeries<double> ts(60, seconds(600));
442 uint64_t value = 0x3fffffffffffffff;
443 FOR_EACH_RANGE(i, 0, 16) {
444 ts.addValue(seconds(0), value);
447 EXPECT_DOUBLE_EQ(value, ts.avg());
448 EXPECT_DOUBLE_EQ(value, ts.avg<float>());
449 // Some precision is lost here due to the huge sum, so the
450 // integer average returned is off by one.
451 EXPECT_NEAR(value, ts.avg<uint64_t>(), 1);
452 EXPECT_NEAR(value, ts.avg<int64_t>(), 1);
456 // Test BucketedTimeSeries with a smaller integer type
457 BucketedTimeSeries<int16_t> ts(60, seconds(600));
458 FOR_EACH_RANGE(i, 0, 101) {
459 ts.addValue(seconds(0), i);
462 EXPECT_DOUBLE_EQ(50.0, ts.avg());
463 EXPECT_DOUBLE_EQ(50.0, ts.avg<float>());
464 EXPECT_EQ(50, ts.avg<uint64_t>());
465 EXPECT_EQ(50, ts.avg<int64_t>());
466 EXPECT_EQ(50, ts.avg<int16_t>());
467 EXPECT_EQ(50, ts.avg<int8_t>());
471 // Test BucketedTimeSeries with long double input
472 BucketedTimeSeries<long double> ts(60, seconds(600));
473 ts.addValueAggregated(seconds(0), 1000.0L, 7);
475 long double expected = 1000.0L / 7.0L;
476 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
477 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
478 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
479 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
480 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
484 // Test BucketedTimeSeries with int64_t values,
485 // but using an average that requires a fair amount of precision.
486 BucketedTimeSeries<int64_t> ts(60, seconds(600));
487 ts.addValueAggregated(seconds(0), 1000, 7);
489 long double expected = 1000.0L / 7.0L;
490 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
491 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
492 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
493 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
494 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
498 TEST(BucketedTimeSeries, forEachBucket) {
499 typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
501 BucketInfo(const Bucket* b, TimePoint s, TimePoint ns)
502 : bucket(b), start(s), nextStart(ns) {}
504 const Bucket* bucket;
509 for (const auto& data : testData) {
510 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
512 vector<BucketInfo> info;
514 const Bucket& bucket,
515 TimePoint bucketStart,
516 TimePoint bucketEnd) -> bool {
517 info.emplace_back(&bucket, bucketStart, bucketEnd);
521 // If we haven't yet added any data, the current bucket will start at 0,
522 // and all data previous buckets will have negative times.
523 ts.forEachBucket(fn);
525 CHECK_EQ(data.numBuckets, info.size());
527 // Check the data passed in to the function
529 size_t bucketIdx = 1;
530 seconds offset = -data.duration;
531 for (size_t n = 0; n < data.numBuckets; ++n) {
532 if (bucketIdx >= data.numBuckets) {
534 offset += data.duration;
537 EXPECT_EQ(data.bucketStarts[bucketIdx] + offset, info[infoIdx].start)
538 << data.duration << "x" << data.numBuckets
539 << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
541 size_t nextBucketIdx = bucketIdx + 1;
542 seconds nextOffset = offset;
543 if (nextBucketIdx >= data.numBuckets) {
545 nextOffset += data.duration;
548 data.bucketStarts[nextBucketIdx] + nextOffset,
549 info[infoIdx].nextStart)
550 << data.duration << "x" << data.numBuckets
551 << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
553 EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
561 TEST(BucketedTimeSeries, queryByIntervalSimple) {
562 BucketedTimeSeries<int> a(3, seconds(12));
563 for (int i = 0; i < 8; i++) {
564 a.addValue(seconds(i), 1);
566 // We added 1 at each second from 0..7
567 // Query from the time period 0..2.
568 // This is entirely in the first bucket, which has a sum of 4.
569 // The code knows only part of the bucket is covered, and correctly
570 // estimates the desired sum as 3.
571 EXPECT_EQ(2, a.sum(mkTimePoint(0), mkTimePoint(2)));
574 TEST(BucketedTimeSeries, queryByInterval) {
575 // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets
576 const int kNumBuckets = 3;
577 const int kDuration = 6;
578 BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration));
580 for (unsigned int i = 0; i < kDuration; ++i) {
581 // add value 'i' at time 'i'
582 b.addValue(mkTimePoint(i), i);
585 // Current bucket state:
586 // 0: time=[0, 2): values=(0, 1), sum=1, count=2
587 // 1: time=[2, 4): values=(2, 3), sum=5, count=1
588 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
589 double expectedSums1[kDuration + 1][kDuration + 1] = {
590 {0, 4.5, 9, 11.5, 14, 14.5, 15},
591 {0, 4.5, 7, 9.5, 10, 10.5, -1},
592 {0, 2.5, 5, 5.5, 6, -1, -1},
593 {0, 2.5, 3, 3.5, -1, -1, -1},
594 {0, 0.5, 1, -1, -1, -1, -1},
595 {0, 0.5, -1, -1, -1, -1, -1},
596 {0, -1, -1, -1, -1, -1, -1}
598 int expectedCounts1[kDuration + 1][kDuration + 1] = {
599 {0, 1, 2, 3, 4, 5, 6},
600 {0, 1, 2, 3, 4, 5, -1},
601 {0, 1, 2, 3, 4, -1, -1},
602 {0, 1, 2, 3, -1, -1, -1},
603 {0, 1, 2, -1, -1, -1, -1},
604 {0, 1, -1, -1, -1, -1, -1},
605 {0, -1, -1, -1, -1, -1, -1}
608 TimePoint currentTime = b.getLatestTime() + seconds(1);
609 for (int i = 0; i <= kDuration + 1; i++) {
610 for (int j = 0; j <= kDuration - i; j++) {
611 TimePoint start = currentTime - seconds(i + j);
612 TimePoint end = currentTime - seconds(i);
613 double expectedSum = expectedSums1[i][j];
614 EXPECT_EQ(expectedSum, b.sum(start, end))
615 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
618 uint64_t expectedCount = expectedCounts1[i][j];
619 EXPECT_EQ(expectedCount, b.count(start, end))
620 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
623 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
624 EXPECT_EQ(expectedAvg, b.avg(start, end))
625 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
628 double expectedRate = j ? expectedSum / j : 0;
629 EXPECT_EQ(expectedRate, b.rate(start, end))
630 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
635 // Add 3 more values.
636 // This will overwrite 1 full bucket, and put us halfway through the next.
637 for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
638 b.addValue(mkTimePoint(i), i);
640 EXPECT_EQ(mkTimePoint(4), b.getEarliestTime());
642 // Current bucket state:
643 // 0: time=[6, 8): values=(6, 7), sum=13, count=2
644 // 1: time=[8, 10): values=(8), sum=8, count=1
645 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
646 double expectedSums2[kDuration + 1][kDuration + 1] = {
647 {0, 8, 14.5, 21, 25.5, 30, 30},
648 {0, 6.5, 13, 17.5, 22, 22, -1},
649 {0, 6.5, 11, 15.5, 15.5, -1, -1},
650 {0, 4.5, 9, 9, -1, -1, -1},
651 {0, 4.5, 4.5, -1, -1, -1, -1},
652 {0, 0, -1, -1, -1, -1, -1},
653 {0, -1, -1, -1, -1, -1, -1}
655 int expectedCounts2[kDuration + 1][kDuration + 1] = {
656 {0, 1, 2, 3, 4, 5, 5},
657 {0, 1, 2, 3, 4, 4, -1},
658 {0, 1, 2, 3, 3, -1, -1},
659 {0, 1, 2, 2, -1, -1, -1},
660 {0, 1, 1, -1, -1, -1, -1},
661 {0, 0, -1, -1, -1, -1, -1},
662 {0, -1, -1, -1, -1, -1, -1}
665 currentTime = b.getLatestTime() + seconds(1);
666 for (int i = 0; i <= kDuration + 1; i++) {
667 for (int j = 0; j <= kDuration - i; j++) {
668 TimePoint start = currentTime - seconds(i + j);
669 TimePoint end = currentTime - seconds(i);
670 double expectedSum = expectedSums2[i][j];
671 EXPECT_EQ(expectedSum, b.sum(start, end))
672 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
675 uint64_t expectedCount = expectedCounts2[i][j];
676 EXPECT_EQ(expectedCount, b.count(start, end))
677 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
680 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
681 EXPECT_EQ(expectedAvg, b.avg(start, end))
682 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
685 TimePoint dataStart = std::max(start, b.getEarliestTime());
686 TimePoint dataEnd = std::max(end, dataStart);
687 seconds expectedInterval = dataEnd - dataStart;
688 EXPECT_EQ(expectedInterval, b.elapsed(start, end))
689 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
692 double expectedRate = expectedInterval.count() ?
693 expectedSum / expectedInterval.count() : 0;
694 EXPECT_EQ(expectedRate, b.rate(start, end))
695 << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
701 TEST(BucketedTimeSeries, rateByInterval) {
702 const int kNumBuckets = 5;
703 const seconds kDuration(10);
704 BucketedTimeSeries<double> b(kNumBuckets, kDuration);
706 // Add data points at a constant rate of 10 per second.
707 // Start adding data points at kDuration, and fill half of the buckets for
709 TimePoint start(kDuration);
710 TimePoint end(kDuration + (kDuration / 2));
711 const double kFixedRate = 10.0;
712 for (TimePoint i = start; i < end; i += seconds(1)) {
713 b.addValue(i, kFixedRate);
716 // Querying the rate should yield kFixedRate.
717 EXPECT_EQ(kFixedRate, b.rate());
718 EXPECT_EQ(kFixedRate, b.rate(start, end));
719 EXPECT_EQ(kFixedRate, b.rate(start, start + kDuration));
720 EXPECT_EQ(kFixedRate, b.rate(end - kDuration, end));
721 EXPECT_EQ(kFixedRate, b.rate(end - seconds(1), end));
722 // We have been adding 1 data point per second, so countRate()
724 EXPECT_EQ(1.0, b.countRate());
725 EXPECT_EQ(1.0, b.countRate(start, end));
726 EXPECT_EQ(1.0, b.countRate(start, start + kDuration));
727 EXPECT_EQ(1.0, b.countRate(end - kDuration, end));
728 EXPECT_EQ(1.0, b.countRate(end - seconds(1), end));
730 // We haven't added anything before time kDuration.
731 // Querying data earlier than this should result in a rate of 0.
732 EXPECT_EQ(0.0, b.rate(mkTimePoint(0), mkTimePoint(1)));
733 EXPECT_EQ(0.0, b.countRate(mkTimePoint(0), mkTimePoint(1)));
735 // Fill the remainder of the timeseries from kDuration to kDuration*2
737 end = TimePoint(kDuration * 2);
738 for (TimePoint i = start; i < end; i += seconds(1)) {
739 b.addValue(i, kFixedRate);
742 EXPECT_EQ(kFixedRate, b.rate());
743 EXPECT_EQ(kFixedRate, b.rate(TimePoint(kDuration), TimePoint(kDuration * 2)));
744 EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 2)));
745 EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 10)));
746 EXPECT_EQ(1.0, b.countRate());
747 EXPECT_EQ(1.0, b.countRate(TimePoint(kDuration), TimePoint(kDuration * 2)));
748 EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 2)));
749 EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 10)));
752 TEST(BucketedTimeSeries, addHistorical) {
753 const int kNumBuckets = 5;
754 const seconds kDuration(10);
755 BucketedTimeSeries<double> b(kNumBuckets, kDuration);
757 // Initially fill with a constant rate of data
758 for (TimePoint i = mkTimePoint(0); i < mkTimePoint(10); i += seconds(1)) {
762 EXPECT_EQ(10.0, b.rate());
763 EXPECT_EQ(10.0, b.avg());
764 EXPECT_EQ(10, b.count());
766 // Add some more data points to the middle bucket
767 b.addValue(mkTimePoint(4), 40.0);
768 b.addValue(mkTimePoint(5), 40.0);
769 EXPECT_EQ(15.0, b.avg());
770 EXPECT_EQ(18.0, b.rate());
771 EXPECT_EQ(12, b.count());
773 // Now start adding more current data points, until we are about to roll over
774 // the bucket where we added the extra historical data.
775 for (TimePoint i = mkTimePoint(10); i < mkTimePoint(14); i += seconds(1)) {
778 EXPECT_EQ(15.0, b.avg());
779 EXPECT_EQ(18.0, b.rate());
780 EXPECT_EQ(12, b.count());
782 // Now roll over the middle bucket
783 b.addValue(mkTimePoint(14), 10.0);
784 b.addValue(mkTimePoint(15), 10.0);
785 EXPECT_EQ(10.0, b.avg());
786 EXPECT_EQ(10.0, b.rate());
787 EXPECT_EQ(10, b.count());
789 // Add more historical values past the bucket window.
790 // These should be ignored.
791 EXPECT_FALSE(b.addValue(mkTimePoint(4), 40.0));
792 EXPECT_FALSE(b.addValue(mkTimePoint(5), 40.0));
793 EXPECT_EQ(10.0, b.avg());
794 EXPECT_EQ(10.0, b.rate());
795 EXPECT_EQ(10, b.count());
806 const seconds kMinuteHourDurations[] = {
807 seconds(60), seconds(3600), seconds(0)
811 TEST(MinuteHourTimeSeries, Basic) {
812 folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
813 IntMHTS::kMinuteHourDurations);
814 EXPECT_EQ(mhts.numLevels(), IntMHTS::NUM_LEVELS);
815 EXPECT_EQ(mhts.numLevels(), 3);
817 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 0);
818 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 0);
819 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
821 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 0);
822 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 0);
823 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 0);
825 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 0);
826 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 0);
827 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 0);
829 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 0);
830 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 0);
831 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 0);
835 mhts.addValue(cur_time++, 10);
838 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 1);
839 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 1);
840 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 1);
842 for (int i = 0; i < 299; ++i) {
843 mhts.addValue(cur_time++, 10);
847 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
848 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 300);
849 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 300);
851 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
852 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 300*10);
853 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 300*10);
855 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
856 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
857 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
859 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
860 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
861 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
863 for (int i = 0; i < 3600*3 - 300; ++i) {
864 mhts.addValue(cur_time++, 10);
868 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
869 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 3600);
870 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 3600*3);
872 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
873 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*10);
874 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 3600*3*10);
876 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
877 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
878 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
880 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
881 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
882 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
884 for (int i = 0; i < 3600; ++i) {
885 mhts.addValue(cur_time++, 100);
889 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*100);
890 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*100);
891 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
892 3600*3*10 + 3600*100);
894 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 100);
895 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 100);
896 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 32.5);
897 EXPECT_EQ(mhts.avg<int>(IntMHTS::ALLTIME), 32);
899 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 100);
900 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 100);
901 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 32.5);
902 EXPECT_EQ(mhts.rate<int>(IntMHTS::ALLTIME), 32);
904 for (int i = 0; i < 1800; ++i) {
905 mhts.addValue(cur_time++, 120);
909 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*120);
910 EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
911 1800*100 + 1800*120);
912 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
913 3600*3*10 + 3600*100 + 1800*120);
915 for (int i = 0; i < 60; ++i) {
916 mhts.addValue(cur_time++, 1000);
920 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*1000);
921 EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
922 1740*100 + 1800*120 + 60*1000);
923 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
924 3600*3*10 + 3600*100 + 1800*120 + 60*1000);
927 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
930 TEST(MinuteHourTimeSeries, QueryByInterval) {
931 folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
932 IntMHTS::kMinuteHourDurations);
935 for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
936 curTime += seconds(1)) {
937 mhts.addValue(curTime, 1);
939 for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
940 curTime += seconds(1)) {
941 mhts.addValue(curTime, 10);
943 for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
944 curTime += seconds(1)) {
945 mhts.addValue(curTime, 100);
949 struct TimeInterval {
953 TimeInterval intervals[12] = {
954 { curTime - seconds(60), curTime },
955 { curTime - seconds(3600), curTime },
956 { curTime - seconds(7200), curTime },
957 { curTime - seconds(3600), curTime - seconds(60) },
958 { curTime - seconds(7200), curTime - seconds(60) },
959 { curTime - seconds(7200), curTime - seconds(3600) },
960 { curTime - seconds(50), curTime - seconds(20) },
961 { curTime - seconds(3020), curTime - seconds(20) },
962 { curTime - seconds(7200), curTime - seconds(20) },
963 { curTime - seconds(3000), curTime - seconds(1000) },
964 { curTime - seconds(7200), curTime - seconds(1000) },
965 { curTime - seconds(7200), curTime - seconds(3600) },
968 int expectedSums[12] = {
969 6000, 41400, 32400, 35400, 32130, 16200, 3000, 33600, 32310, 20000, 27900,
973 int expectedCounts[12] = {
974 60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600
977 for (int i = 0; i < 12; ++i) {
978 TimeInterval interval = intervals[i];
980 int s = mhts.sum(interval.start, interval.end);
981 EXPECT_EQ(expectedSums[i], s);
983 int c = mhts.count(interval.start, interval.end);
984 EXPECT_EQ(expectedCounts[i], c);
986 int a = mhts.avg<int>(interval.start, interval.end);
987 EXPECT_EQ(expectedCounts[i] ?
988 (expectedSums[i] / expectedCounts[i]) : 0,
991 int r = mhts.rate<int>(interval.start, interval.end);
993 expectedSums[i] / (interval.end - interval.start).count();
994 EXPECT_EQ(expectedRate, r);
998 TEST(MultiLevelTimeSeries, Basic) {
999 // using constructor with initializer_list parameter
1000 folly::MultiLevelTimeSeries<int> mhts(
1001 60, {seconds(60), seconds(3600), seconds(0)});
1002 EXPECT_EQ(mhts.numLevels(), 3);
1004 EXPECT_EQ(mhts.sum(seconds(60)), 0);
1005 EXPECT_EQ(mhts.sum(seconds(3600)), 0);
1006 EXPECT_EQ(mhts.sum(seconds(0)), 0);
1008 EXPECT_EQ(mhts.avg(seconds(60)), 0);
1009 EXPECT_EQ(mhts.avg(seconds(3600)), 0);
1010 EXPECT_EQ(mhts.avg(seconds(0)), 0);
1012 EXPECT_EQ(mhts.rate(seconds(60)), 0);
1013 EXPECT_EQ(mhts.rate(seconds(3600)), 0);
1014 EXPECT_EQ(mhts.rate(seconds(0)), 0);
1016 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 0);
1017 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 0);
1018 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 0);
1020 seconds cur_time(0);
1022 mhts.addValue(cur_time++, 10);
1025 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 1);
1026 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 1);
1027 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 1);
1029 for (int i = 0; i < 299; ++i) {
1030 mhts.addValue(cur_time++, 10);
1034 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
1035 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 300);
1036 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 300);
1038 EXPECT_EQ(mhts.sum(seconds(60)), 600);
1039 EXPECT_EQ(mhts.sum(seconds(3600)), 300 * 10);
1040 EXPECT_EQ(mhts.sum(seconds(0)), 300 * 10);
1042 EXPECT_EQ(mhts.avg(seconds(60)), 10);
1043 EXPECT_EQ(mhts.avg(seconds(3600)), 10);
1044 EXPECT_EQ(mhts.avg(seconds(0)), 10);
1046 EXPECT_EQ(mhts.rate(seconds(60)), 10);
1047 EXPECT_EQ(mhts.rate(seconds(3600)), 10);
1048 EXPECT_EQ(mhts.rate(seconds(0)), 10);
1050 for (int i = 0; i < 3600 * 3 - 300; ++i) {
1051 mhts.addValue(cur_time++, 10);
1055 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
1056 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 3600);
1057 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 3600 * 3);
1059 EXPECT_EQ(mhts.sum(seconds(60)), 600);
1060 EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 10);
1061 EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10);
1063 EXPECT_EQ(mhts.avg(seconds(60)), 10);
1064 EXPECT_EQ(mhts.avg(seconds(3600)), 10);
1065 EXPECT_EQ(mhts.avg(seconds(0)), 10);
1067 EXPECT_EQ(mhts.rate(seconds(60)), 10);
1068 EXPECT_EQ(mhts.rate(seconds(3600)), 10);
1069 EXPECT_EQ(mhts.rate(seconds(0)), 10);
1071 for (int i = 0; i < 3600; ++i) {
1072 mhts.addValue(cur_time++, 100);
1076 EXPECT_EQ(mhts.sum(seconds(60)), 60 * 100);
1077 EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 100);
1078 EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100);
1080 EXPECT_EQ(mhts.avg(seconds(60)), 100);
1081 EXPECT_EQ(mhts.avg(seconds(3600)), 100);
1082 EXPECT_EQ(mhts.avg(seconds(0)), 32.5);
1083 EXPECT_EQ(mhts.avg<int>(seconds(0)), 32);
1085 EXPECT_EQ(mhts.rate(seconds(60)), 100);
1086 EXPECT_EQ(mhts.rate(seconds(3600)), 100);
1087 EXPECT_EQ(mhts.rate(seconds(0)), 32.5);
1088 EXPECT_EQ(mhts.rate<int>(seconds(0)), 32);
1090 for (int i = 0; i < 1800; ++i) {
1091 mhts.addValue(cur_time++, 120);
1095 EXPECT_EQ(mhts.sum(seconds(60)), 60 * 120);
1096 EXPECT_EQ(mhts.sum(seconds(3600)), 1800 * 100 + 1800 * 120);
1097 EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100 + 1800 * 120);
1099 for (int i = 0; i < 60; ++i) {
1100 mhts.addValue(cur_time++, 1000);
1104 EXPECT_EQ(mhts.sum(seconds(60)), 60 * 1000);
1105 EXPECT_EQ(mhts.sum(seconds(3600)), 1740 * 100 + 1800 * 120 + 60 * 1000);
1107 mhts.sum(seconds(0)),
1108 3600 * 3 * 10 + 3600 * 100 + 1800 * 120 + 60 * 1000);
1111 EXPECT_EQ(mhts.sum(seconds(0)), 0);
1114 TEST(MultiLevelTimeSeries, QueryByInterval) {
1115 folly::MultiLevelTimeSeries<int> mhts(
1116 60, {seconds(60), seconds(3600), seconds(0)});
1119 for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
1120 curTime += seconds(1)) {
1121 mhts.addValue(curTime, 1);
1123 for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
1124 curTime += seconds(1)) {
1125 mhts.addValue(curTime, 10);
1127 for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
1128 curTime += seconds(1)) {
1129 mhts.addValue(curTime, 100);
1133 struct TimeInterval {
1138 std::array<TimeInterval, 12> intervals = {{
1139 {curTime - seconds(60), curTime},
1140 {curTime - seconds(3600), curTime},
1141 {curTime - seconds(7200), curTime},
1142 {curTime - seconds(3600), curTime - seconds(60)},
1143 {curTime - seconds(7200), curTime - seconds(60)},
1144 {curTime - seconds(7200), curTime - seconds(3600)},
1145 {curTime - seconds(50), curTime - seconds(20)},
1146 {curTime - seconds(3020), curTime - seconds(20)},
1147 {curTime - seconds(7200), curTime - seconds(20)},
1148 {curTime - seconds(3000), curTime - seconds(1000)},
1149 {curTime - seconds(7200), curTime - seconds(1000)},
1150 {curTime - seconds(7200), curTime - seconds(3600)},
1153 std::array<int, 12> expectedSums = {{6000,
1166 std::array<int, 12> expectedCounts = {
1167 {60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600}};
1169 for (size_t i = 0; i < intervals.size(); ++i) {
1170 TimeInterval interval = intervals[i];
1172 int s = mhts.sum(interval.start, interval.end);
1173 EXPECT_EQ(expectedSums[i], s);
1175 int c = mhts.count(interval.start, interval.end);
1176 EXPECT_EQ(expectedCounts[i], c);
1178 int a = mhts.avg<int>(interval.start, interval.end);
1179 EXPECT_EQ(expectedCounts[i] ? (expectedSums[i] / expectedCounts[i]) : 0, a);
1181 int r = mhts.rate<int>(interval.start, interval.end);
1183 expectedSums[i] / (interval.end - interval.start).count();
1184 EXPECT_EQ(expectedRate, r);