2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "folly/stats/BucketedTimeSeries.h"
18 #include "folly/stats/BucketedTimeSeries-defs.h"
20 #include <glog/logging.h>
21 #include <gtest/gtest.h>
23 #include "folly/Foreach.h"
25 using std::chrono::seconds;
28 using folly::BucketedTimeSeries;
33 vector<ssize_t> bucketStarts;
35 vector<TestData> testData = {
36 // 71 seconds x 4 buckets
37 { 71, 4, {0, 18, 36, 54}},
38 // 100 seconds x 10 buckets
39 { 100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}},
40 // 10 seconds x 10 buckets
41 { 10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
42 // 10 seconds x 1 buckets
44 // 1 second x 1 buckets
48 TEST(BucketedTimeSeries, getBucketInfo) {
49 for (const auto& data : testData) {
50 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
52 for (uint32_t n = 0; n < 10000; n += 1234) {
53 seconds offset(n * data.duration);
55 for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
56 seconds bucketStart(data.bucketStarts[idx]);
57 seconds nextBucketStart;
58 if (idx + 1 < data.numBuckets) {
59 nextBucketStart = seconds(data.bucketStarts[idx + 1]);
61 nextBucketStart = seconds(data.duration);
64 seconds expectedStart = offset + bucketStart;
65 seconds expectedNextStart = offset + nextBucketStart;
66 seconds midpoint = (expectedStart + expectedNextStart) / 2;
68 vector<std::pair<string, seconds>> timePoints = {
69 {"expectedStart", expectedStart},
70 {"midpoint", midpoint},
71 {"expectedEnd", expectedNextStart - seconds(1)},
74 for (const auto& point : timePoints) {
75 // Check that getBucketIdx() returns the expected index
76 EXPECT_EQ(idx, ts.getBucketIdx(point.second)) <<
77 data.duration << "x" << data.numBuckets << ": " <<
78 point.first << "=" << point.second.count();
80 // Check the data returned by getBucketInfo()
82 seconds returnedStart;
83 seconds returnedNextStart;
84 ts.getBucketInfo(expectedStart, &returnedIdx,
85 &returnedStart, &returnedNextStart);
86 EXPECT_EQ(idx, returnedIdx) <<
87 data.duration << "x" << data.numBuckets << ": " <<
88 point.first << "=" << point.second.count();
89 EXPECT_EQ(expectedStart.count(), returnedStart.count()) <<
90 data.duration << "x" << data.numBuckets << ": " <<
91 point.first << "=" << point.second.count();
92 EXPECT_EQ(expectedNextStart.count(), returnedNextStart.count()) <<
93 data.duration << "x" << data.numBuckets << ": " <<
94 point.first << "=" << point.second.count();
101 void testUpdate100x10(size_t offset) {
102 // This test code only works when offset is a multiple of the bucket width
103 CHECK_EQ(0, offset % 10);
105 // Create a 100 second timeseries, with 10 buckets
106 BucketedTimeSeries<int64_t> ts(10, seconds(100));
110 // Add 1 value to each bucket
111 for (int n = 5; n <= 95; n += 10) {
112 ts.addValue(seconds(n + offset), 6);
115 EXPECT_EQ(10, ts.count());
116 EXPECT_EQ(60, ts.sum());
117 EXPECT_EQ(6, ts.avg());
120 // Update 2 buckets forwards. This should throw away 2 data points.
122 ts.update(seconds(110 + offset));
123 EXPECT_EQ(8, ts.count());
124 EXPECT_EQ(48, ts.sum());
125 EXPECT_EQ(6, ts.avg());
127 // The last time we added was 95.
128 // Try updating to 189. This should clear everything but the last bucket.
130 ts.update(seconds(151 + offset));
131 EXPECT_EQ(4, ts.count());
132 //EXPECT_EQ(6, ts.sum());
133 EXPECT_EQ(6, ts.avg());
135 // The last time we added was 95.
136 // Try updating to 193: This is nearly one full loop around,
137 // back to the same bucket. update() needs to clear everything
139 ts.update(seconds(193 + offset));
140 EXPECT_EQ(0, ts.count());
141 EXPECT_EQ(0, ts.sum());
142 EXPECT_EQ(0, ts.avg());
144 // The last time we added was 95.
145 // Try updating to 197: This is slightly over one full loop around,
146 // back to the same bucket. update() needs to clear everything
148 ts.update(seconds(197 + offset));
149 EXPECT_EQ(0, ts.count());
150 EXPECT_EQ(0, ts.sum());
151 EXPECT_EQ(0, ts.avg());
153 // The last time we added was 95.
154 // Try updating to 230: This is well over one full loop around,
155 // and everything should be cleared.
157 ts.update(seconds(230 + offset));
158 EXPECT_EQ(0, ts.count());
159 EXPECT_EQ(0, ts.sum());
160 EXPECT_EQ(0, ts.avg());
163 TEST(BucketedTimeSeries, update100x10) {
164 // Run testUpdate100x10() multiple times, with various offsets.
165 // This makes sure the update code works regardless of which bucket it starts
166 // at in the modulo arithmetic.
168 testUpdate100x10(50);
169 testUpdate100x10(370);
170 testUpdate100x10(1937090);
173 TEST(BucketedTimeSeries, update71x5) {
174 // Create a 71 second timeseries, with 5 buckets
175 // This tests when the number of buckets does not divide evenly into the
177 BucketedTimeSeries<int64_t> ts(5, seconds(71));
181 // Add 1 value to each bucket
182 ts.addValue(seconds(11), 6);
183 ts.addValue(seconds(24), 6);
184 ts.addValue(seconds(42), 6);
185 ts.addValue(seconds(43), 6);
186 ts.addValue(seconds(66), 6);
188 EXPECT_EQ(5, ts.count());
189 EXPECT_EQ(30, ts.sum());
190 EXPECT_EQ(6, ts.avg());
193 // Update 2 buckets forwards. This should throw away 2 data points.
195 ts.update(seconds(99));
196 EXPECT_EQ(3, ts.count());
197 EXPECT_EQ(18, ts.sum());
198 EXPECT_EQ(6, ts.avg());
200 // Update 3 buckets forwards. This should throw away 3 data points.
202 ts.update(seconds(100));
203 EXPECT_EQ(2, ts.count());
204 EXPECT_EQ(12, ts.sum());
205 EXPECT_EQ(6, ts.avg());
207 // Update 4 buckets forwards, just under the wrap limit.
208 // This should throw everything but the last bucket away.
210 ts.update(seconds(127));
211 EXPECT_EQ(1, ts.count());
212 EXPECT_EQ(6, ts.sum());
213 EXPECT_EQ(6, ts.avg());
215 // Update 5 buckets forwards, exactly at the wrap limit.
216 // This should throw everything away.
218 ts.update(seconds(128));
219 EXPECT_EQ(0, ts.count());
220 EXPECT_EQ(0, ts.sum());
221 EXPECT_EQ(0, ts.avg());
223 // Update very far forwards, wrapping multiple times.
224 // This should throw everything away.
226 ts.update(seconds(1234));
227 EXPECT_EQ(0, ts.count());
228 EXPECT_EQ(0, ts.sum());
229 EXPECT_EQ(0, ts.avg());
232 TEST(BucketedTimeSeries, elapsed) {
233 BucketedTimeSeries<int64_t> ts(60, seconds(600));
235 // elapsed() is 0 when no data points have been added
236 EXPECT_EQ(0, ts.elapsed().count());
238 // With exactly 1 data point, elapsed() should report 1 second of data
239 seconds start(239218);
240 ts.addValue(start + seconds(0), 200);
241 EXPECT_EQ(1, ts.elapsed().count());
242 // Adding a data point 10 seconds later should result in an elapsed time of
243 // 11 seconds (the time range is [0, 10], inclusive).
244 ts.addValue(start + seconds(10), 200);
245 EXPECT_EQ(11, ts.elapsed().count());
247 // elapsed() returns to 0 after clear()
249 EXPECT_EQ(0, ts.elapsed().count());
251 // Restart, with the starting point on an easier number to work with
252 ts.addValue(seconds(10), 200);
253 EXPECT_EQ(1, ts.elapsed().count());
254 ts.addValue(seconds(580), 200);
255 EXPECT_EQ(571, ts.elapsed().count());
256 ts.addValue(seconds(590), 200);
257 EXPECT_EQ(581, ts.elapsed().count());
258 ts.addValue(seconds(598), 200);
259 EXPECT_EQ(589, ts.elapsed().count());
260 ts.addValue(seconds(599), 200);
261 EXPECT_EQ(590, ts.elapsed().count());
262 ts.addValue(seconds(600), 200);
263 EXPECT_EQ(591, ts.elapsed().count());
264 ts.addValue(seconds(608), 200);
265 EXPECT_EQ(599, ts.elapsed().count());
266 ts.addValue(seconds(609), 200);
267 EXPECT_EQ(600, ts.elapsed().count());
268 // Once we reach 600 seconds worth of data, when we move on to the next
269 // second a full bucket will get thrown out. Now we drop back down to 591
270 // seconds worth of data
271 ts.addValue(seconds(610), 200);
272 EXPECT_EQ(591, ts.elapsed().count());
273 ts.addValue(seconds(618), 200);
274 EXPECT_EQ(599, ts.elapsed().count());
275 ts.addValue(seconds(619), 200);
276 EXPECT_EQ(600, ts.elapsed().count());
277 ts.addValue(seconds(620), 200);
278 EXPECT_EQ(591, ts.elapsed().count());
279 ts.addValue(seconds(123419), 200);
280 EXPECT_EQ(600, ts.elapsed().count());
281 ts.addValue(seconds(123420), 200);
282 EXPECT_EQ(591, ts.elapsed().count());
283 ts.addValue(seconds(123425), 200);
284 EXPECT_EQ(596, ts.elapsed().count());
286 // Time never moves backwards.
287 // Calling update with an old timestamp will just be ignored.
288 ts.update(seconds(29));
289 EXPECT_EQ(596, ts.elapsed().count());
292 TEST(BucketedTimeSeries, rate) {
293 BucketedTimeSeries<int64_t> ts(60, seconds(600));
295 // Add 3 values every 2 seconds, until fill up the buckets
296 for (size_t n = 0; n < 600; n += 2) {
297 ts.addValue(seconds(n), 200, 3);
300 EXPECT_EQ(900, ts.count());
301 EXPECT_EQ(180000, ts.sum());
302 EXPECT_EQ(200, ts.avg());
304 // Really we only entered 599 seconds worth of data: [0, 598] (inclusive)
305 EXPECT_EQ(599, ts.elapsed().count());
306 EXPECT_NEAR(300.5, ts.rate(), 0.005);
307 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
309 // If we add 1 more second, now we will have 600 seconds worth of data
310 ts.update(seconds(599));
311 EXPECT_EQ(600, ts.elapsed().count());
312 EXPECT_NEAR(300, ts.rate(), 0.005);
313 EXPECT_EQ(300, ts.rate<int>());
314 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
316 // However, 1 more second after that and we will have filled up all the
317 // buckets, and have to drop one.
318 ts.update(seconds(600));
319 EXPECT_EQ(591, ts.elapsed().count());
320 EXPECT_NEAR(299.5, ts.rate(), 0.01);
321 EXPECT_EQ(299, ts.rate<int>());
322 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
325 TEST(BucketedTimeSeries, avgTypeConversion) {
326 // Make sure the computed average values are accurate regardless
327 // of the input type and return type.
330 // Simple sanity tests for small positive integer values
331 BucketedTimeSeries<int64_t> ts(60, seconds(600));
332 ts.addValue(seconds(0), 4, 100);
333 ts.addValue(seconds(0), 10, 200);
334 ts.addValue(seconds(0), 16, 100);
336 EXPECT_DOUBLE_EQ(10.0, ts.avg());
337 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
338 EXPECT_EQ(10, ts.avg<uint64_t>());
339 EXPECT_EQ(10, ts.avg<int64_t>());
340 EXPECT_EQ(10, ts.avg<int32_t>());
341 EXPECT_EQ(10, ts.avg<int16_t>());
342 EXPECT_EQ(10, ts.avg<int8_t>());
343 EXPECT_EQ(10, ts.avg<uint8_t>());
347 // Test signed integer types with negative values
348 BucketedTimeSeries<int64_t> ts(60, seconds(600));
349 ts.addValue(seconds(0), -100);
350 ts.addValue(seconds(0), -200);
351 ts.addValue(seconds(0), -300);
352 ts.addValue(seconds(0), -200, 65535);
354 EXPECT_DOUBLE_EQ(-200.0, ts.avg());
355 EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>());
356 EXPECT_EQ(-200, ts.avg<int64_t>());
357 EXPECT_EQ(-200, ts.avg<int32_t>());
358 EXPECT_EQ(-200, ts.avg<int16_t>());
362 // Test uint64_t values that would overflow int64_t
363 BucketedTimeSeries<uint64_t> ts(60, seconds(600));
364 ts.addValueAggregated(seconds(0),
365 std::numeric_limits<uint64_t>::max(),
366 std::numeric_limits<uint64_t>::max());
368 EXPECT_DOUBLE_EQ(1.0, ts.avg());
369 EXPECT_DOUBLE_EQ(1.0, ts.avg<float>());
370 EXPECT_EQ(1, ts.avg<uint64_t>());
371 EXPECT_EQ(1, ts.avg<int64_t>());
372 EXPECT_EQ(1, ts.avg<int8_t>());
376 // Test doubles with small-ish values that will fit in integer types
377 BucketedTimeSeries<double> ts(60, seconds(600));
378 ts.addValue(seconds(0), 4.0, 100);
379 ts.addValue(seconds(0), 10.0, 200);
380 ts.addValue(seconds(0), 16.0, 100);
382 EXPECT_DOUBLE_EQ(10.0, ts.avg());
383 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
384 EXPECT_EQ(10, ts.avg<uint64_t>());
385 EXPECT_EQ(10, ts.avg<int64_t>());
386 EXPECT_EQ(10, ts.avg<int32_t>());
387 EXPECT_EQ(10, ts.avg<int16_t>());
388 EXPECT_EQ(10, ts.avg<int8_t>());
389 EXPECT_EQ(10, ts.avg<uint8_t>());
393 // Test doubles with huge values
394 BucketedTimeSeries<double> ts(60, seconds(600));
395 ts.addValue(seconds(0), 1e19, 100);
396 ts.addValue(seconds(0), 2e19, 200);
397 ts.addValue(seconds(0), 3e19, 100);
399 EXPECT_DOUBLE_EQ(ts.avg(), 2e19);
400 EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11);
404 // Test doubles where the sum adds up larger than a uint64_t,
405 // but the average fits in an int64_t
406 BucketedTimeSeries<double> ts(60, seconds(600));
407 uint64_t value = 0x3fffffffffffffff;
408 FOR_EACH_RANGE(i, 0, 16) {
409 ts.addValue(seconds(0), value);
412 EXPECT_DOUBLE_EQ(value, ts.avg());
413 EXPECT_DOUBLE_EQ(value, ts.avg<float>());
414 // Some precision is lost here due to the huge sum, so the
415 // integer average returned is off by one.
416 EXPECT_NEAR(value, ts.avg<uint64_t>(), 1);
417 EXPECT_NEAR(value, ts.avg<int64_t>(), 1);
421 // Test BucketedTimeSeries with a smaller integer type
422 BucketedTimeSeries<int16_t> ts(60, seconds(600));
423 FOR_EACH_RANGE(i, 0, 101) {
424 ts.addValue(seconds(0), i);
427 EXPECT_DOUBLE_EQ(50.0, ts.avg());
428 EXPECT_DOUBLE_EQ(50.0, ts.avg<float>());
429 EXPECT_EQ(50, ts.avg<uint64_t>());
430 EXPECT_EQ(50, ts.avg<int64_t>());
431 EXPECT_EQ(50, ts.avg<int16_t>());
432 EXPECT_EQ(50, ts.avg<int8_t>());
436 // Test BucketedTimeSeries with long double input
437 BucketedTimeSeries<long double> ts(60, seconds(600));
438 ts.addValueAggregated(seconds(0), 1000.0L, 7);
440 long double expected = 1000.0L / 7.0L;
441 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
442 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
443 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
444 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
445 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
449 // Test BucketedTimeSeries with int64_t values,
450 // but using an average that requires a fair amount of precision.
451 BucketedTimeSeries<int64_t> ts(60, seconds(600));
452 ts.addValueAggregated(seconds(0), 1000, 7);
454 long double expected = 1000.0L / 7.0L;
455 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
456 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
457 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
458 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
459 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
463 TEST(BucketedTimeSeries, forEachBucket) {
464 typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
466 BucketInfo(const Bucket* b, seconds s, seconds ns)
467 : bucket(b), start(s), nextStart(ns) {}
469 const Bucket* bucket;
474 for (const auto& data : testData) {
475 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
477 vector<BucketInfo> info;
478 auto fn = [&](const Bucket& bucket, seconds bucketStart,
479 seconds bucketEnd) -> bool {
480 info.emplace_back(&bucket, bucketStart, bucketEnd);
484 // If we haven't yet added any data, the current bucket will start at 0,
485 // and all data previous buckets will have negative times.
486 ts.forEachBucket(fn);
488 CHECK_EQ(data.numBuckets, info.size());
490 // Check the data passed in to the function
492 size_t bucketIdx = 1;
493 ssize_t offset = -data.duration;
494 for (size_t n = 0; n < data.numBuckets; ++n) {
495 if (bucketIdx >= data.numBuckets) {
497 offset += data.duration;
500 EXPECT_EQ(data.bucketStarts[bucketIdx] + offset,
501 info[infoIdx].start.count()) <<
502 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
503 bucketIdx << ", infoIdx=" << infoIdx;
505 size_t nextBucketIdx = bucketIdx + 1;
506 ssize_t nextOffset = offset;
507 if (nextBucketIdx >= data.numBuckets) {
509 nextOffset += data.duration;
511 EXPECT_EQ(data.bucketStarts[nextBucketIdx] + nextOffset,
512 info[infoIdx].nextStart.count()) <<
513 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
514 bucketIdx << ", infoIdx=" << infoIdx;
516 EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
524 TEST(BucketedTimeSeries, queryByIntervalSimple) {
525 BucketedTimeSeries<int> a(3, seconds(12));
526 for (int i = 0; i < 8; i++) {
527 a.addValue(seconds(i), 1);
529 // We added 1 at each second from 0..7
530 // Query from the time period 0..2.
531 // This is entirely in the first bucket, which has a sum of 4.
532 // The code knows only part of the bucket is covered, and correctly
533 // estimates the desired sum as 3.
534 EXPECT_EQ(2, a.sum(seconds(0), seconds(2)));
537 TEST(BucketedTimeSeries, queryByInterval) {
538 // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets
539 const int kNumBuckets = 3;
540 const int kDuration = 6;
541 BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration));
543 for (unsigned int i = 0; i < kDuration; ++i) {
544 // add value 'i' at time 'i'
545 b.addValue(seconds(i), i);
548 // Current bucket state:
549 // 0: time=[0, 2): values=(0, 1), sum=1, count=2
550 // 1: time=[2, 4): values=(2, 3), sum=5, count=1
551 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
552 double expectedSums1[kDuration + 1][kDuration + 1] = {
553 {0, 4.5, 9, 11.5, 14, 14.5, 15},
554 {0, 4.5, 7, 9.5, 10, 10.5, -1},
555 {0, 2.5, 5, 5.5, 6, -1, -1},
556 {0, 2.5, 3, 3.5, -1, -1, -1},
557 {0, 0.5, 1, -1, -1, -1, -1},
558 {0, 0.5, -1, -1, -1, -1, -1},
559 {0, -1, -1, -1, -1, -1, -1}
561 int expectedCounts1[kDuration + 1][kDuration + 1] = {
562 {0, 1, 2, 3, 4, 5, 6},
563 {0, 1, 2, 3, 4, 5, -1},
564 {0, 1, 2, 3, 4, -1, -1},
565 {0, 1, 2, 3, -1, -1, -1},
566 {0, 1, 2, -1, -1, -1, -1},
567 {0, 1, -1, -1, -1, -1, -1},
568 {0, -1, -1, -1, -1, -1, -1}
571 seconds currentTime = b.getLatestTime() + seconds(1);
572 for (int i = 0; i <= kDuration + 1; i++) {
573 for (int j = 0; j <= kDuration - i; j++) {
574 seconds start = currentTime - seconds(i + j);
575 seconds end = currentTime - seconds(i);
576 double expectedSum = expectedSums1[i][j];
577 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
578 "i=" << i << ", j=" << j <<
579 ", interval=[" << start.count() << ", " << end.count() << ")";
581 uint64_t expectedCount = expectedCounts1[i][j];
582 EXPECT_EQ(expectedCount, b.count(start, end)) <<
583 "i=" << i << ", j=" << j <<
584 ", interval=[" << start.count() << ", " << end.count() << ")";
586 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
587 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
588 "i=" << i << ", j=" << j <<
589 ", interval=[" << start.count() << ", " << end.count() << ")";
591 double expectedRate = j ? expectedSum / j : 0;
592 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
593 "i=" << i << ", j=" << j <<
594 ", interval=[" << start.count() << ", " << end.count() << ")";
598 // Add 3 more values.
599 // This will overwrite 1 full bucket, and put us halfway through the next.
600 for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
601 b.addValue(seconds(i), i);
604 // Current bucket state:
605 // 0: time=[6, 8): values=(6, 7), sum=13, count=2
606 // 1: time=[8, 10): values=(8), sum=8, count=1
607 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
608 double expectedSums2[kDuration + 1][kDuration + 1] = {
609 {0, 8, 14.5, 21, 25.5, 30, 30},
610 {0, 6.5, 13, 17.5, 22, 22, -1},
611 {0, 6.5, 11, 15.5, 15.5, -1, -1},
612 {0, 4.5, 9, 9, -1, -1, -1},
613 {0, 4.5, 4.5, -1, -1, -1, -1},
614 {0, 0, -1, -1, -1, -1, -1},
615 {0, -1, -1, -1, -1, -1, -1}
617 int expectedCounts2[kDuration + 1][kDuration + 1] = {
618 {0, 1, 2, 3, 4, 5, 5},
619 {0, 1, 2, 3, 4, 4, -1},
620 {0, 1, 2, 3, 3, -1, -1},
621 {0, 1, 2, 2, -1, -1, -1},
622 {0, 1, 1, -1, -1, -1, -1},
623 {0, 0, -1, -1, -1, -1, -1},
624 {0, -1, -1, -1, -1, -1, -1}
627 currentTime = b.getLatestTime() + seconds(1);
628 for (int i = 0; i <= kDuration + 1; i++) {
629 for (int j = 0; j <= kDuration - i; j++) {
630 seconds start = currentTime - seconds(i + j);
631 seconds end = currentTime - seconds(i);
632 double expectedSum = expectedSums2[i][j];
633 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
634 "i=" << i << ", j=" << j <<
635 ", interval=[" << start.count() << ", " << end.count() << ")";
637 uint64_t expectedCount = expectedCounts2[i][j];
638 EXPECT_EQ(expectedCount, b.count(start, end)) <<
639 "i=" << i << ", j=" << j <<
640 ", interval=[" << start.count() << ", " << end.count() << ")";
642 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
643 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
644 "i=" << i << ", j=" << j <<
645 ", interval=[" << start.count() << ", " << end.count() << ")";
647 double expectedRate = j ? expectedSum / j : 0;
648 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
649 "i=" << i << ", j=" << j <<
650 ", interval=[" << start.count() << ", " << end.count() << ")";