2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "folly/stats/BucketedTimeSeries.h"
18 #include "folly/stats/BucketedTimeSeries-defs.h"
19 #include "folly/stats/MultiLevelTimeSeries.h"
20 #include "folly/stats/MultiLevelTimeSeries-defs.h"
22 #include <glog/logging.h>
23 #include <gtest/gtest.h>
25 #include "folly/Foreach.h"
27 using std::chrono::seconds;
30 using folly::BucketedTimeSeries;
35 vector<ssize_t> bucketStarts;
37 vector<TestData> testData = {
38 // 71 seconds x 4 buckets
39 { 71, 4, {0, 18, 36, 54}},
40 // 100 seconds x 10 buckets
41 { 100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}},
42 // 10 seconds x 10 buckets
43 { 10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
44 // 10 seconds x 1 buckets
46 // 1 second x 1 buckets
50 TEST(BucketedTimeSeries, getBucketInfo) {
51 for (const auto& data : testData) {
52 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
54 for (uint32_t n = 0; n < 10000; n += 1234) {
55 seconds offset(n * data.duration);
57 for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
58 seconds bucketStart(data.bucketStarts[idx]);
59 seconds nextBucketStart;
60 if (idx + 1 < data.numBuckets) {
61 nextBucketStart = seconds(data.bucketStarts[idx + 1]);
63 nextBucketStart = seconds(data.duration);
66 seconds expectedStart = offset + bucketStart;
67 seconds expectedNextStart = offset + nextBucketStart;
68 seconds midpoint = (expectedStart + expectedNextStart) / 2;
70 vector<std::pair<string, seconds>> timePoints = {
71 {"expectedStart", expectedStart},
72 {"midpoint", midpoint},
73 {"expectedEnd", expectedNextStart - seconds(1)},
76 for (const auto& point : timePoints) {
77 // Check that getBucketIdx() returns the expected index
78 EXPECT_EQ(idx, ts.getBucketIdx(point.second)) <<
79 data.duration << "x" << data.numBuckets << ": " <<
80 point.first << "=" << point.second.count();
82 // Check the data returned by getBucketInfo()
84 seconds returnedStart;
85 seconds returnedNextStart;
86 ts.getBucketInfo(expectedStart, &returnedIdx,
87 &returnedStart, &returnedNextStart);
88 EXPECT_EQ(idx, returnedIdx) <<
89 data.duration << "x" << data.numBuckets << ": " <<
90 point.first << "=" << point.second.count();
91 EXPECT_EQ(expectedStart.count(), returnedStart.count()) <<
92 data.duration << "x" << data.numBuckets << ": " <<
93 point.first << "=" << point.second.count();
94 EXPECT_EQ(expectedNextStart.count(), returnedNextStart.count()) <<
95 data.duration << "x" << data.numBuckets << ": " <<
96 point.first << "=" << point.second.count();
103 void testUpdate100x10(size_t offset) {
104 // This test code only works when offset is a multiple of the bucket width
105 CHECK_EQ(0, offset % 10);
107 // Create a 100 second timeseries, with 10 buckets
108 BucketedTimeSeries<int64_t> ts(10, seconds(100));
112 // Add 1 value to each bucket
113 for (int n = 5; n <= 95; n += 10) {
114 ts.addValue(seconds(n + offset), 6);
117 EXPECT_EQ(10, ts.count());
118 EXPECT_EQ(60, ts.sum());
119 EXPECT_EQ(6, ts.avg());
122 // Update 2 buckets forwards. This should throw away 2 data points.
124 ts.update(seconds(110 + offset));
125 EXPECT_EQ(8, ts.count());
126 EXPECT_EQ(48, ts.sum());
127 EXPECT_EQ(6, ts.avg());
129 // The last time we added was 95.
130 // Try updating to 189. This should clear everything but the last bucket.
132 ts.update(seconds(151 + offset));
133 EXPECT_EQ(4, ts.count());
134 //EXPECT_EQ(6, ts.sum());
135 EXPECT_EQ(6, ts.avg());
137 // The last time we added was 95.
138 // Try updating to 193: This is nearly one full loop around,
139 // back to the same bucket. update() needs to clear everything
141 ts.update(seconds(193 + offset));
142 EXPECT_EQ(0, ts.count());
143 EXPECT_EQ(0, ts.sum());
144 EXPECT_EQ(0, ts.avg());
146 // The last time we added was 95.
147 // Try updating to 197: This is slightly over one full loop around,
148 // back to the same bucket. update() needs to clear everything
150 ts.update(seconds(197 + offset));
151 EXPECT_EQ(0, ts.count());
152 EXPECT_EQ(0, ts.sum());
153 EXPECT_EQ(0, ts.avg());
155 // The last time we added was 95.
156 // Try updating to 230: This is well over one full loop around,
157 // and everything should be cleared.
159 ts.update(seconds(230 + offset));
160 EXPECT_EQ(0, ts.count());
161 EXPECT_EQ(0, ts.sum());
162 EXPECT_EQ(0, ts.avg());
165 TEST(BucketedTimeSeries, update100x10) {
166 // Run testUpdate100x10() multiple times, with various offsets.
167 // This makes sure the update code works regardless of which bucket it starts
168 // at in the modulo arithmetic.
170 testUpdate100x10(50);
171 testUpdate100x10(370);
172 testUpdate100x10(1937090);
175 TEST(BucketedTimeSeries, update71x5) {
176 // Create a 71 second timeseries, with 5 buckets
177 // This tests when the number of buckets does not divide evenly into the
179 BucketedTimeSeries<int64_t> ts(5, seconds(71));
183 // Add 1 value to each bucket
184 ts.addValue(seconds(11), 6);
185 ts.addValue(seconds(24), 6);
186 ts.addValue(seconds(42), 6);
187 ts.addValue(seconds(43), 6);
188 ts.addValue(seconds(66), 6);
190 EXPECT_EQ(5, ts.count());
191 EXPECT_EQ(30, ts.sum());
192 EXPECT_EQ(6, ts.avg());
195 // Update 2 buckets forwards. This should throw away 2 data points.
197 ts.update(seconds(99));
198 EXPECT_EQ(3, ts.count());
199 EXPECT_EQ(18, ts.sum());
200 EXPECT_EQ(6, ts.avg());
202 // Update 3 buckets forwards. This should throw away 3 data points.
204 ts.update(seconds(100));
205 EXPECT_EQ(2, ts.count());
206 EXPECT_EQ(12, ts.sum());
207 EXPECT_EQ(6, ts.avg());
209 // Update 4 buckets forwards, just under the wrap limit.
210 // This should throw everything but the last bucket away.
212 ts.update(seconds(127));
213 EXPECT_EQ(1, ts.count());
214 EXPECT_EQ(6, ts.sum());
215 EXPECT_EQ(6, ts.avg());
217 // Update 5 buckets forwards, exactly at the wrap limit.
218 // This should throw everything away.
220 ts.update(seconds(128));
221 EXPECT_EQ(0, ts.count());
222 EXPECT_EQ(0, ts.sum());
223 EXPECT_EQ(0, ts.avg());
225 // Update very far forwards, wrapping multiple times.
226 // This should throw everything away.
228 ts.update(seconds(1234));
229 EXPECT_EQ(0, ts.count());
230 EXPECT_EQ(0, ts.sum());
231 EXPECT_EQ(0, ts.avg());
234 TEST(BucketedTimeSeries, elapsed) {
235 BucketedTimeSeries<int64_t> ts(60, seconds(600));
237 // elapsed() is 0 when no data points have been added
238 EXPECT_EQ(0, ts.elapsed().count());
240 // With exactly 1 data point, elapsed() should report 1 second of data
241 seconds start(239218);
242 ts.addValue(start + seconds(0), 200);
243 EXPECT_EQ(1, ts.elapsed().count());
244 // Adding a data point 10 seconds later should result in an elapsed time of
245 // 11 seconds (the time range is [0, 10], inclusive).
246 ts.addValue(start + seconds(10), 200);
247 EXPECT_EQ(11, ts.elapsed().count());
249 // elapsed() returns to 0 after clear()
251 EXPECT_EQ(0, ts.elapsed().count());
253 // Restart, with the starting point on an easier number to work with
254 ts.addValue(seconds(10), 200);
255 EXPECT_EQ(1, ts.elapsed().count());
256 ts.addValue(seconds(580), 200);
257 EXPECT_EQ(571, ts.elapsed().count());
258 ts.addValue(seconds(590), 200);
259 EXPECT_EQ(581, ts.elapsed().count());
260 ts.addValue(seconds(598), 200);
261 EXPECT_EQ(589, ts.elapsed().count());
262 ts.addValue(seconds(599), 200);
263 EXPECT_EQ(590, ts.elapsed().count());
264 ts.addValue(seconds(600), 200);
265 EXPECT_EQ(591, ts.elapsed().count());
266 ts.addValue(seconds(608), 200);
267 EXPECT_EQ(599, ts.elapsed().count());
268 ts.addValue(seconds(609), 200);
269 EXPECT_EQ(600, ts.elapsed().count());
270 // Once we reach 600 seconds worth of data, when we move on to the next
271 // second a full bucket will get thrown out. Now we drop back down to 591
272 // seconds worth of data
273 ts.addValue(seconds(610), 200);
274 EXPECT_EQ(591, ts.elapsed().count());
275 ts.addValue(seconds(618), 200);
276 EXPECT_EQ(599, ts.elapsed().count());
277 ts.addValue(seconds(619), 200);
278 EXPECT_EQ(600, ts.elapsed().count());
279 ts.addValue(seconds(620), 200);
280 EXPECT_EQ(591, ts.elapsed().count());
281 ts.addValue(seconds(123419), 200);
282 EXPECT_EQ(600, ts.elapsed().count());
283 ts.addValue(seconds(123420), 200);
284 EXPECT_EQ(591, ts.elapsed().count());
285 ts.addValue(seconds(123425), 200);
286 EXPECT_EQ(596, ts.elapsed().count());
288 // Time never moves backwards.
289 // Calling update with an old timestamp will just be ignored.
290 ts.update(seconds(29));
291 EXPECT_EQ(596, ts.elapsed().count());
294 TEST(BucketedTimeSeries, rate) {
295 BucketedTimeSeries<int64_t> ts(60, seconds(600));
297 // Add 3 values every 2 seconds, until fill up the buckets
298 for (size_t n = 0; n < 600; n += 2) {
299 ts.addValue(seconds(n), 200, 3);
302 EXPECT_EQ(900, ts.count());
303 EXPECT_EQ(180000, ts.sum());
304 EXPECT_EQ(200, ts.avg());
306 // Really we only entered 599 seconds worth of data: [0, 598] (inclusive)
307 EXPECT_EQ(599, ts.elapsed().count());
308 EXPECT_NEAR(300.5, ts.rate(), 0.005);
309 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
311 // If we add 1 more second, now we will have 600 seconds worth of data
312 ts.update(seconds(599));
313 EXPECT_EQ(600, ts.elapsed().count());
314 EXPECT_NEAR(300, ts.rate(), 0.005);
315 EXPECT_EQ(300, ts.rate<int>());
316 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
318 // However, 1 more second after that and we will have filled up all the
319 // buckets, and have to drop one.
320 ts.update(seconds(600));
321 EXPECT_EQ(591, ts.elapsed().count());
322 EXPECT_NEAR(299.5, ts.rate(), 0.01);
323 EXPECT_EQ(299, ts.rate<int>());
324 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
327 TEST(BucketedTimeSeries, avgTypeConversion) {
328 // Make sure the computed average values are accurate regardless
329 // of the input type and return type.
332 // Simple sanity tests for small positive integer values
333 BucketedTimeSeries<int64_t> ts(60, seconds(600));
334 ts.addValue(seconds(0), 4, 100);
335 ts.addValue(seconds(0), 10, 200);
336 ts.addValue(seconds(0), 16, 100);
338 EXPECT_DOUBLE_EQ(10.0, ts.avg());
339 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
340 EXPECT_EQ(10, ts.avg<uint64_t>());
341 EXPECT_EQ(10, ts.avg<int64_t>());
342 EXPECT_EQ(10, ts.avg<int32_t>());
343 EXPECT_EQ(10, ts.avg<int16_t>());
344 EXPECT_EQ(10, ts.avg<int8_t>());
345 EXPECT_EQ(10, ts.avg<uint8_t>());
349 // Test signed integer types with negative values
350 BucketedTimeSeries<int64_t> ts(60, seconds(600));
351 ts.addValue(seconds(0), -100);
352 ts.addValue(seconds(0), -200);
353 ts.addValue(seconds(0), -300);
354 ts.addValue(seconds(0), -200, 65535);
356 EXPECT_DOUBLE_EQ(-200.0, ts.avg());
357 EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>());
358 EXPECT_EQ(-200, ts.avg<int64_t>());
359 EXPECT_EQ(-200, ts.avg<int32_t>());
360 EXPECT_EQ(-200, ts.avg<int16_t>());
364 // Test uint64_t values that would overflow int64_t
365 BucketedTimeSeries<uint64_t> ts(60, seconds(600));
366 ts.addValueAggregated(seconds(0),
367 std::numeric_limits<uint64_t>::max(),
368 std::numeric_limits<uint64_t>::max());
370 EXPECT_DOUBLE_EQ(1.0, ts.avg());
371 EXPECT_DOUBLE_EQ(1.0, ts.avg<float>());
372 EXPECT_EQ(1, ts.avg<uint64_t>());
373 EXPECT_EQ(1, ts.avg<int64_t>());
374 EXPECT_EQ(1, ts.avg<int8_t>());
378 // Test doubles with small-ish values that will fit in integer types
379 BucketedTimeSeries<double> ts(60, seconds(600));
380 ts.addValue(seconds(0), 4.0, 100);
381 ts.addValue(seconds(0), 10.0, 200);
382 ts.addValue(seconds(0), 16.0, 100);
384 EXPECT_DOUBLE_EQ(10.0, ts.avg());
385 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
386 EXPECT_EQ(10, ts.avg<uint64_t>());
387 EXPECT_EQ(10, ts.avg<int64_t>());
388 EXPECT_EQ(10, ts.avg<int32_t>());
389 EXPECT_EQ(10, ts.avg<int16_t>());
390 EXPECT_EQ(10, ts.avg<int8_t>());
391 EXPECT_EQ(10, ts.avg<uint8_t>());
395 // Test doubles with huge values
396 BucketedTimeSeries<double> ts(60, seconds(600));
397 ts.addValue(seconds(0), 1e19, 100);
398 ts.addValue(seconds(0), 2e19, 200);
399 ts.addValue(seconds(0), 3e19, 100);
401 EXPECT_DOUBLE_EQ(ts.avg(), 2e19);
402 EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11);
406 // Test doubles where the sum adds up larger than a uint64_t,
407 // but the average fits in an int64_t
408 BucketedTimeSeries<double> ts(60, seconds(600));
409 uint64_t value = 0x3fffffffffffffff;
410 FOR_EACH_RANGE(i, 0, 16) {
411 ts.addValue(seconds(0), value);
414 EXPECT_DOUBLE_EQ(value, ts.avg());
415 EXPECT_DOUBLE_EQ(value, ts.avg<float>());
416 // Some precision is lost here due to the huge sum, so the
417 // integer average returned is off by one.
418 EXPECT_NEAR(value, ts.avg<uint64_t>(), 1);
419 EXPECT_NEAR(value, ts.avg<int64_t>(), 1);
423 // Test BucketedTimeSeries with a smaller integer type
424 BucketedTimeSeries<int16_t> ts(60, seconds(600));
425 FOR_EACH_RANGE(i, 0, 101) {
426 ts.addValue(seconds(0), i);
429 EXPECT_DOUBLE_EQ(50.0, ts.avg());
430 EXPECT_DOUBLE_EQ(50.0, ts.avg<float>());
431 EXPECT_EQ(50, ts.avg<uint64_t>());
432 EXPECT_EQ(50, ts.avg<int64_t>());
433 EXPECT_EQ(50, ts.avg<int16_t>());
434 EXPECT_EQ(50, ts.avg<int8_t>());
438 // Test BucketedTimeSeries with long double input
439 BucketedTimeSeries<long double> ts(60, seconds(600));
440 ts.addValueAggregated(seconds(0), 1000.0L, 7);
442 long double expected = 1000.0L / 7.0L;
443 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
444 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
445 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
446 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
447 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
451 // Test BucketedTimeSeries with int64_t values,
452 // but using an average that requires a fair amount of precision.
453 BucketedTimeSeries<int64_t> ts(60, seconds(600));
454 ts.addValueAggregated(seconds(0), 1000, 7);
456 long double expected = 1000.0L / 7.0L;
457 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
458 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
459 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
460 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
461 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
465 TEST(BucketedTimeSeries, forEachBucket) {
466 typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
468 BucketInfo(const Bucket* b, seconds s, seconds ns)
469 : bucket(b), start(s), nextStart(ns) {}
471 const Bucket* bucket;
476 for (const auto& data : testData) {
477 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
479 vector<BucketInfo> info;
480 auto fn = [&](const Bucket& bucket, seconds bucketStart,
481 seconds bucketEnd) -> bool {
482 info.emplace_back(&bucket, bucketStart, bucketEnd);
486 // If we haven't yet added any data, the current bucket will start at 0,
487 // and all data previous buckets will have negative times.
488 ts.forEachBucket(fn);
490 CHECK_EQ(data.numBuckets, info.size());
492 // Check the data passed in to the function
494 size_t bucketIdx = 1;
495 ssize_t offset = -data.duration;
496 for (size_t n = 0; n < data.numBuckets; ++n) {
497 if (bucketIdx >= data.numBuckets) {
499 offset += data.duration;
502 EXPECT_EQ(data.bucketStarts[bucketIdx] + offset,
503 info[infoIdx].start.count()) <<
504 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
505 bucketIdx << ", infoIdx=" << infoIdx;
507 size_t nextBucketIdx = bucketIdx + 1;
508 ssize_t nextOffset = offset;
509 if (nextBucketIdx >= data.numBuckets) {
511 nextOffset += data.duration;
513 EXPECT_EQ(data.bucketStarts[nextBucketIdx] + nextOffset,
514 info[infoIdx].nextStart.count()) <<
515 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
516 bucketIdx << ", infoIdx=" << infoIdx;
518 EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
526 TEST(BucketedTimeSeries, queryByIntervalSimple) {
527 BucketedTimeSeries<int> a(3, seconds(12));
528 for (int i = 0; i < 8; i++) {
529 a.addValue(seconds(i), 1);
531 // We added 1 at each second from 0..7
532 // Query from the time period 0..2.
533 // This is entirely in the first bucket, which has a sum of 4.
534 // The code knows only part of the bucket is covered, and correctly
535 // estimates the desired sum as 3.
536 EXPECT_EQ(2, a.sum(seconds(0), seconds(2)));
539 TEST(BucketedTimeSeries, queryByInterval) {
540 // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets
541 const int kNumBuckets = 3;
542 const int kDuration = 6;
543 BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration));
545 for (unsigned int i = 0; i < kDuration; ++i) {
546 // add value 'i' at time 'i'
547 b.addValue(seconds(i), i);
550 // Current bucket state:
551 // 0: time=[0, 2): values=(0, 1), sum=1, count=2
552 // 1: time=[2, 4): values=(2, 3), sum=5, count=1
553 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
554 double expectedSums1[kDuration + 1][kDuration + 1] = {
555 {0, 4.5, 9, 11.5, 14, 14.5, 15},
556 {0, 4.5, 7, 9.5, 10, 10.5, -1},
557 {0, 2.5, 5, 5.5, 6, -1, -1},
558 {0, 2.5, 3, 3.5, -1, -1, -1},
559 {0, 0.5, 1, -1, -1, -1, -1},
560 {0, 0.5, -1, -1, -1, -1, -1},
561 {0, -1, -1, -1, -1, -1, -1}
563 int expectedCounts1[kDuration + 1][kDuration + 1] = {
564 {0, 1, 2, 3, 4, 5, 6},
565 {0, 1, 2, 3, 4, 5, -1},
566 {0, 1, 2, 3, 4, -1, -1},
567 {0, 1, 2, 3, -1, -1, -1},
568 {0, 1, 2, -1, -1, -1, -1},
569 {0, 1, -1, -1, -1, -1, -1},
570 {0, -1, -1, -1, -1, -1, -1}
573 seconds currentTime = b.getLatestTime() + seconds(1);
574 for (int i = 0; i <= kDuration + 1; i++) {
575 for (int j = 0; j <= kDuration - i; j++) {
576 seconds start = currentTime - seconds(i + j);
577 seconds end = currentTime - seconds(i);
578 double expectedSum = expectedSums1[i][j];
579 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
580 "i=" << i << ", j=" << j <<
581 ", interval=[" << start.count() << ", " << end.count() << ")";
583 uint64_t expectedCount = expectedCounts1[i][j];
584 EXPECT_EQ(expectedCount, b.count(start, end)) <<
585 "i=" << i << ", j=" << j <<
586 ", interval=[" << start.count() << ", " << end.count() << ")";
588 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
589 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
590 "i=" << i << ", j=" << j <<
591 ", interval=[" << start.count() << ", " << end.count() << ")";
593 double expectedRate = j ? expectedSum / j : 0;
594 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
595 "i=" << i << ", j=" << j <<
596 ", interval=[" << start.count() << ", " << end.count() << ")";
600 // Add 3 more values.
601 // This will overwrite 1 full bucket, and put us halfway through the next.
602 for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
603 b.addValue(seconds(i), i);
605 EXPECT_EQ(seconds(4), b.getEarliestTime());
607 // Current bucket state:
608 // 0: time=[6, 8): values=(6, 7), sum=13, count=2
609 // 1: time=[8, 10): values=(8), sum=8, count=1
610 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
611 double expectedSums2[kDuration + 1][kDuration + 1] = {
612 {0, 8, 14.5, 21, 25.5, 30, 30},
613 {0, 6.5, 13, 17.5, 22, 22, -1},
614 {0, 6.5, 11, 15.5, 15.5, -1, -1},
615 {0, 4.5, 9, 9, -1, -1, -1},
616 {0, 4.5, 4.5, -1, -1, -1, -1},
617 {0, 0, -1, -1, -1, -1, -1},
618 {0, -1, -1, -1, -1, -1, -1}
620 int expectedCounts2[kDuration + 1][kDuration + 1] = {
621 {0, 1, 2, 3, 4, 5, 5},
622 {0, 1, 2, 3, 4, 4, -1},
623 {0, 1, 2, 3, 3, -1, -1},
624 {0, 1, 2, 2, -1, -1, -1},
625 {0, 1, 1, -1, -1, -1, -1},
626 {0, 0, -1, -1, -1, -1, -1},
627 {0, -1, -1, -1, -1, -1, -1}
630 currentTime = b.getLatestTime() + seconds(1);
631 for (int i = 0; i <= kDuration + 1; i++) {
632 for (int j = 0; j <= kDuration - i; j++) {
633 seconds start = currentTime - seconds(i + j);
634 seconds end = currentTime - seconds(i);
635 double expectedSum = expectedSums2[i][j];
636 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
637 "i=" << i << ", j=" << j <<
638 ", interval=[" << start.count() << ", " << end.count() << ")";
640 uint64_t expectedCount = expectedCounts2[i][j];
641 EXPECT_EQ(expectedCount, b.count(start, end)) <<
642 "i=" << i << ", j=" << j <<
643 ", interval=[" << start.count() << ", " << end.count() << ")";
645 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
646 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
647 "i=" << i << ", j=" << j <<
648 ", interval=[" << start.count() << ", " << end.count() << ")";
650 seconds dataStart = std::max(start, b.getEarliestTime());
651 seconds dataEnd = std::max(end, dataStart);
652 seconds expectedInterval = dataEnd - dataStart;
653 EXPECT_EQ(expectedInterval, b.elapsed(start, end)) <<
654 "i=" << i << ", j=" << j <<
655 ", interval=[" << start.count() << ", " << end.count() << ")";
657 double expectedRate = expectedInterval.count() ?
658 expectedSum / expectedInterval.count() : 0;
659 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
660 "i=" << i << ", j=" << j <<
661 ", interval=[" << start.count() << ", " << end.count() << ")";
666 TEST(BucketedTimeSeries, rateByInterval) {
667 const int kNumBuckets = 5;
668 const seconds kDuration(10);
669 BucketedTimeSeries<double> b(kNumBuckets, kDuration);
671 // Add data points at a constant rate of 10 per second.
672 // Start adding data points at kDuration, and fill half of the buckets for
674 seconds start = kDuration;
675 seconds end = kDuration + (kDuration / 2);
676 const double kFixedRate = 10.0;
677 for (seconds i = start; i < end; ++i) {
678 b.addValue(i, kFixedRate);
681 // Querying the rate should yield kFixedRate.
682 EXPECT_EQ(kFixedRate, b.rate());
683 EXPECT_EQ(kFixedRate, b.rate(start, end));
684 EXPECT_EQ(kFixedRate, b.rate(start, start + kDuration));
685 EXPECT_EQ(kFixedRate, b.rate(end - kDuration, end));
686 EXPECT_EQ(kFixedRate, b.rate(end - seconds(1), end));
687 // We have been adding 1 data point per second, so countRate()
689 EXPECT_EQ(1.0, b.countRate());
690 EXPECT_EQ(1.0, b.countRate(start, end));
691 EXPECT_EQ(1.0, b.countRate(start, start + kDuration));
692 EXPECT_EQ(1.0, b.countRate(end - kDuration, end));
693 EXPECT_EQ(1.0, b.countRate(end - seconds(1), end));
695 // We haven't added anything before time kDuration.
696 // Querying data earlier than this should result in a rate of 0.
697 EXPECT_EQ(0.0, b.rate(seconds(0), seconds(1)));
698 EXPECT_EQ(0.0, b.countRate(seconds(0), seconds(1)));
700 // Fill the remainder of the timeseries from kDuration to kDuration*2
703 for (seconds i = start; i < end; ++i) {
704 b.addValue(i, kFixedRate);
707 EXPECT_EQ(kFixedRate, b.rate());
708 EXPECT_EQ(kFixedRate, b.rate(kDuration, kDuration * 2));
709 EXPECT_EQ(kFixedRate, b.rate(seconds(0), kDuration * 2));
710 EXPECT_EQ(kFixedRate, b.rate(seconds(0), kDuration * 10));
711 EXPECT_EQ(1.0, b.countRate());
712 EXPECT_EQ(1.0, b.countRate(kDuration, kDuration * 2));
713 EXPECT_EQ(1.0, b.countRate(seconds(0), kDuration * 2));
714 EXPECT_EQ(1.0, b.countRate(seconds(0), kDuration * 10));
725 const seconds kMinuteHourDurations[] = {
726 seconds(60), seconds(3600), seconds(0)
730 TEST(MinuteHourTimeSeries, Basic) {
731 folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
732 IntMHTS::kMinuteHourDurations);
733 EXPECT_EQ(mhts.numLevels(), IntMHTS::NUM_LEVELS);
734 EXPECT_EQ(mhts.numLevels(), 3);
736 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 0);
737 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 0);
738 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
740 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 0);
741 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 0);
742 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 0);
744 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 0);
745 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 0);
746 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 0);
748 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 0);
749 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 0);
750 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 0);
754 mhts.addValue(cur_time++, 10);
757 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 1);
758 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 1);
759 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 1);
761 for (int i = 0; i < 299; ++i) {
762 mhts.addValue(cur_time++, 10);
766 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
767 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 300);
768 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 300);
770 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
771 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 300*10);
772 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 300*10);
774 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
775 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
776 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
778 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
779 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
780 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
782 for (int i = 0; i < 3600*3 - 300; ++i) {
783 mhts.addValue(cur_time++, 10);
787 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
788 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 3600);
789 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 3600*3);
791 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
792 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*10);
793 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 3600*3*10);
795 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
796 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
797 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
799 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
800 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
801 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
803 for (int i = 0; i < 3600; ++i) {
804 mhts.addValue(cur_time++, 100);
808 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*100);
809 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*100);
810 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
811 3600*3*10 + 3600*100);
813 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 100);
814 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 100);
815 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 32.5);
817 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 100);
818 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 100);
819 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 32);
821 for (int i = 0; i < 1800; ++i) {
822 mhts.addValue(cur_time++, 120);
826 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*120);
827 EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
828 1800*100 + 1800*120);
829 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
830 3600*3*10 + 3600*100 + 1800*120);
832 for (int i = 0; i < 60; ++i) {
833 mhts.addValue(cur_time++, 1000);
837 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*1000);
838 EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
839 1740*100 + 1800*120 + 60*1000);
840 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
841 3600*3*10 + 3600*100 + 1800*120 + 60*1000);
844 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
847 TEST(MinuteHourTimeSeries, QueryByInterval) {
848 folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
849 IntMHTS::kMinuteHourDurations);
852 for (curTime = seconds(0); curTime < seconds(7200); curTime++) {
853 mhts.addValue(curTime, 1);
855 for (curTime = seconds(7200); curTime < seconds(7200 + 3540); curTime++) {
856 mhts.addValue(curTime, 10);
858 for (curTime = seconds(7200 + 3540); curTime < seconds(7200 + 3600);
860 mhts.addValue(curTime, 100);
864 struct TimeInterval {
868 TimeInterval intervals[12] = {
869 { curTime - seconds(60), curTime },
870 { curTime - seconds(3600), curTime },
871 { curTime - seconds(7200), curTime },
872 { curTime - seconds(3600), curTime - seconds(60) },
873 { curTime - seconds(7200), curTime - seconds(60) },
874 { curTime - seconds(7200), curTime - seconds(3600) },
875 { curTime - seconds(50), curTime - seconds(20) },
876 { curTime - seconds(3020), curTime - seconds(20) },
877 { curTime - seconds(7200), curTime - seconds(20) },
878 { curTime - seconds(3000), curTime - seconds(1000) },
879 { curTime - seconds(7200), curTime - seconds(1000) },
880 { curTime - seconds(7200), curTime - seconds(3600) },
883 int expectedSums[12] = {
884 6000, 41400, 32400, 35400, 32130, 16200, 3000, 33600, 32310, 20000, 27900,
888 int expectedCounts[12] = {
889 60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600
892 for (int i = 0; i < 12; ++i) {
893 TimeInterval interval = intervals[i];
895 int s = mhts.sum(interval.start, interval.end);
896 EXPECT_EQ(expectedSums[i], s);
898 int c = mhts.count(interval.start, interval.end);
899 EXPECT_EQ(expectedCounts[i], c);
901 int a = mhts.avg<int>(interval.start, interval.end);
902 EXPECT_EQ(expectedCounts[i] ?
903 (expectedSums[i] / expectedCounts[i]) : 0,
906 int r = mhts.rate<int>(interval.start, interval.end);
908 expectedSums[i] / (interval.end - interval.start).count();
909 EXPECT_EQ(expectedRate, r);