2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/stats/BucketedTimeSeries.h>
18 #include <folly/stats/BucketedTimeSeries-defs.h>
19 #include <folly/stats/MultiLevelTimeSeries.h>
20 #include <folly/stats/MultiLevelTimeSeries-defs.h>
24 #include <glog/logging.h>
25 #include <gtest/gtest.h>
27 #include <folly/Foreach.h>
29 using std::chrono::seconds;
32 using folly::BucketedTimeSeries;
37 vector<ssize_t> bucketStarts;
39 vector<TestData> testData = {
40 // 71 seconds x 4 buckets
41 { 71, 4, {0, 18, 36, 54}},
42 // 100 seconds x 10 buckets
43 { 100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}},
44 // 10 seconds x 10 buckets
45 { 10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
46 // 10 seconds x 1 buckets
48 // 1 second x 1 buckets
52 TEST(BucketedTimeSeries, getBucketInfo) {
53 for (const auto& data : testData) {
54 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
56 for (uint32_t n = 0; n < 10000; n += 1234) {
57 seconds offset(n * data.duration);
59 for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
60 seconds bucketStart(data.bucketStarts[idx]);
61 seconds nextBucketStart;
62 if (idx + 1 < data.numBuckets) {
63 nextBucketStart = seconds(data.bucketStarts[idx + 1]);
65 nextBucketStart = seconds(data.duration);
68 seconds expectedStart = offset + bucketStart;
69 seconds expectedNextStart = offset + nextBucketStart;
70 seconds midpoint = (expectedStart + expectedNextStart) / 2;
72 vector<std::pair<string, seconds>> timePoints = {
73 {"expectedStart", expectedStart},
74 {"midpoint", midpoint},
75 {"expectedEnd", expectedNextStart - seconds(1)},
78 for (const auto& point : timePoints) {
79 // Check that getBucketIdx() returns the expected index
80 EXPECT_EQ(idx, ts.getBucketIdx(point.second)) <<
81 data.duration << "x" << data.numBuckets << ": " <<
82 point.first << "=" << point.second.count();
84 // Check the data returned by getBucketInfo()
86 seconds returnedStart;
87 seconds returnedNextStart;
88 ts.getBucketInfo(expectedStart, &returnedIdx,
89 &returnedStart, &returnedNextStart);
90 EXPECT_EQ(idx, returnedIdx) <<
91 data.duration << "x" << data.numBuckets << ": " <<
92 point.first << "=" << point.second.count();
93 EXPECT_EQ(expectedStart.count(), returnedStart.count()) <<
94 data.duration << "x" << data.numBuckets << ": " <<
95 point.first << "=" << point.second.count();
96 EXPECT_EQ(expectedNextStart.count(), returnedNextStart.count()) <<
97 data.duration << "x" << data.numBuckets << ": " <<
98 point.first << "=" << point.second.count();
105 void testUpdate100x10(size_t offset) {
106 // This test code only works when offset is a multiple of the bucket width
107 CHECK_EQ(0, offset % 10);
109 // Create a 100 second timeseries, with 10 buckets
110 BucketedTimeSeries<int64_t> ts(10, seconds(100));
114 // Add 1 value to each bucket
115 for (int n = 5; n <= 95; n += 10) {
116 ts.addValue(seconds(n + offset), 6);
119 EXPECT_EQ(10, ts.count());
120 EXPECT_EQ(60, ts.sum());
121 EXPECT_EQ(6, ts.avg());
124 // Update 2 buckets forwards. This should throw away 2 data points.
126 ts.update(seconds(110 + offset));
127 EXPECT_EQ(8, ts.count());
128 EXPECT_EQ(48, ts.sum());
129 EXPECT_EQ(6, ts.avg());
131 // The last time we added was 95.
132 // Try updating to 189. This should clear everything but the last bucket.
134 ts.update(seconds(151 + offset));
135 EXPECT_EQ(4, ts.count());
136 //EXPECT_EQ(6, ts.sum());
137 EXPECT_EQ(6, ts.avg());
139 // The last time we added was 95.
140 // Try updating to 193: This is nearly one full loop around,
141 // back to the same bucket. update() needs to clear everything
143 ts.update(seconds(193 + offset));
144 EXPECT_EQ(0, ts.count());
145 EXPECT_EQ(0, ts.sum());
146 EXPECT_EQ(0, ts.avg());
148 // The last time we added was 95.
149 // Try updating to 197: This is slightly over one full loop around,
150 // back to the same bucket. update() needs to clear everything
152 ts.update(seconds(197 + offset));
153 EXPECT_EQ(0, ts.count());
154 EXPECT_EQ(0, ts.sum());
155 EXPECT_EQ(0, ts.avg());
157 // The last time we added was 95.
158 // Try updating to 230: This is well over one full loop around,
159 // and everything should be cleared.
161 ts.update(seconds(230 + offset));
162 EXPECT_EQ(0, ts.count());
163 EXPECT_EQ(0, ts.sum());
164 EXPECT_EQ(0, ts.avg());
167 TEST(BucketedTimeSeries, update100x10) {
168 // Run testUpdate100x10() multiple times, with various offsets.
169 // This makes sure the update code works regardless of which bucket it starts
170 // at in the modulo arithmetic.
172 testUpdate100x10(50);
173 testUpdate100x10(370);
174 testUpdate100x10(1937090);
177 TEST(BucketedTimeSeries, update71x5) {
178 // Create a 71 second timeseries, with 5 buckets
179 // This tests when the number of buckets does not divide evenly into the
181 BucketedTimeSeries<int64_t> ts(5, seconds(71));
185 // Add 1 value to each bucket
186 ts.addValue(seconds(11), 6);
187 ts.addValue(seconds(24), 6);
188 ts.addValue(seconds(42), 6);
189 ts.addValue(seconds(43), 6);
190 ts.addValue(seconds(66), 6);
192 EXPECT_EQ(5, ts.count());
193 EXPECT_EQ(30, ts.sum());
194 EXPECT_EQ(6, ts.avg());
197 // Update 2 buckets forwards. This should throw away 2 data points.
199 ts.update(seconds(99));
200 EXPECT_EQ(3, ts.count());
201 EXPECT_EQ(18, ts.sum());
202 EXPECT_EQ(6, ts.avg());
204 // Update 3 buckets forwards. This should throw away 3 data points.
206 ts.update(seconds(100));
207 EXPECT_EQ(2, ts.count());
208 EXPECT_EQ(12, ts.sum());
209 EXPECT_EQ(6, ts.avg());
211 // Update 4 buckets forwards, just under the wrap limit.
212 // This should throw everything but the last bucket away.
214 ts.update(seconds(127));
215 EXPECT_EQ(1, ts.count());
216 EXPECT_EQ(6, ts.sum());
217 EXPECT_EQ(6, ts.avg());
219 // Update 5 buckets forwards, exactly at the wrap limit.
220 // This should throw everything away.
222 ts.update(seconds(128));
223 EXPECT_EQ(0, ts.count());
224 EXPECT_EQ(0, ts.sum());
225 EXPECT_EQ(0, ts.avg());
227 // Update very far forwards, wrapping multiple times.
228 // This should throw everything away.
230 ts.update(seconds(1234));
231 EXPECT_EQ(0, ts.count());
232 EXPECT_EQ(0, ts.sum());
233 EXPECT_EQ(0, ts.avg());
236 TEST(BucketedTimeSeries, elapsed) {
237 BucketedTimeSeries<int64_t> ts(60, seconds(600));
239 // elapsed() is 0 when no data points have been added
240 EXPECT_EQ(0, ts.elapsed().count());
242 // With exactly 1 data point, elapsed() should report 1 second of data
243 seconds start(239218);
244 ts.addValue(start + seconds(0), 200);
245 EXPECT_EQ(1, ts.elapsed().count());
246 // Adding a data point 10 seconds later should result in an elapsed time of
247 // 11 seconds (the time range is [0, 10], inclusive).
248 ts.addValue(start + seconds(10), 200);
249 EXPECT_EQ(11, ts.elapsed().count());
251 // elapsed() returns to 0 after clear()
253 EXPECT_EQ(0, ts.elapsed().count());
255 // Restart, with the starting point on an easier number to work with
256 ts.addValue(seconds(10), 200);
257 EXPECT_EQ(1, ts.elapsed().count());
258 ts.addValue(seconds(580), 200);
259 EXPECT_EQ(571, ts.elapsed().count());
260 ts.addValue(seconds(590), 200);
261 EXPECT_EQ(581, ts.elapsed().count());
262 ts.addValue(seconds(598), 200);
263 EXPECT_EQ(589, ts.elapsed().count());
264 ts.addValue(seconds(599), 200);
265 EXPECT_EQ(590, ts.elapsed().count());
266 ts.addValue(seconds(600), 200);
267 EXPECT_EQ(591, ts.elapsed().count());
268 ts.addValue(seconds(608), 200);
269 EXPECT_EQ(599, ts.elapsed().count());
270 ts.addValue(seconds(609), 200);
271 EXPECT_EQ(600, ts.elapsed().count());
272 // Once we reach 600 seconds worth of data, when we move on to the next
273 // second a full bucket will get thrown out. Now we drop back down to 591
274 // seconds worth of data
275 ts.addValue(seconds(610), 200);
276 EXPECT_EQ(591, ts.elapsed().count());
277 ts.addValue(seconds(618), 200);
278 EXPECT_EQ(599, ts.elapsed().count());
279 ts.addValue(seconds(619), 200);
280 EXPECT_EQ(600, ts.elapsed().count());
281 ts.addValue(seconds(620), 200);
282 EXPECT_EQ(591, ts.elapsed().count());
283 ts.addValue(seconds(123419), 200);
284 EXPECT_EQ(600, ts.elapsed().count());
285 ts.addValue(seconds(123420), 200);
286 EXPECT_EQ(591, ts.elapsed().count());
287 ts.addValue(seconds(123425), 200);
288 EXPECT_EQ(596, ts.elapsed().count());
290 // Time never moves backwards.
291 // Calling update with an old timestamp will just be ignored.
292 ts.update(seconds(29));
293 EXPECT_EQ(596, ts.elapsed().count());
296 TEST(BucketedTimeSeries, rate) {
297 BucketedTimeSeries<int64_t> ts(60, seconds(600));
299 // Add 3 values every 2 seconds, until fill up the buckets
300 for (size_t n = 0; n < 600; n += 2) {
301 ts.addValue(seconds(n), 200, 3);
304 EXPECT_EQ(900, ts.count());
305 EXPECT_EQ(180000, ts.sum());
306 EXPECT_EQ(200, ts.avg());
308 // Really we only entered 599 seconds worth of data: [0, 598] (inclusive)
309 EXPECT_EQ(599, ts.elapsed().count());
310 EXPECT_NEAR(300.5, ts.rate(), 0.005);
311 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
313 // If we add 1 more second, now we will have 600 seconds worth of data
314 ts.update(seconds(599));
315 EXPECT_EQ(600, ts.elapsed().count());
316 EXPECT_NEAR(300, ts.rate(), 0.005);
317 EXPECT_EQ(300, ts.rate<int>());
318 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
320 // However, 1 more second after that and we will have filled up all the
321 // buckets, and have to drop one.
322 ts.update(seconds(600));
323 EXPECT_EQ(591, ts.elapsed().count());
324 EXPECT_NEAR(299.5, ts.rate(), 0.01);
325 EXPECT_EQ(299, ts.rate<int>());
326 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
329 TEST(BucketedTimeSeries, avgTypeConversion) {
330 // Make sure the computed average values are accurate regardless
331 // of the input type and return type.
334 // Simple sanity tests for small positive integer values
335 BucketedTimeSeries<int64_t> ts(60, seconds(600));
336 ts.addValue(seconds(0), 4, 100);
337 ts.addValue(seconds(0), 10, 200);
338 ts.addValue(seconds(0), 16, 100);
340 EXPECT_DOUBLE_EQ(10.0, ts.avg());
341 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
342 EXPECT_EQ(10, ts.avg<uint64_t>());
343 EXPECT_EQ(10, ts.avg<int64_t>());
344 EXPECT_EQ(10, ts.avg<int32_t>());
345 EXPECT_EQ(10, ts.avg<int16_t>());
346 EXPECT_EQ(10, ts.avg<int8_t>());
347 EXPECT_EQ(10, ts.avg<uint8_t>());
351 // Test signed integer types with negative values
352 BucketedTimeSeries<int64_t> ts(60, seconds(600));
353 ts.addValue(seconds(0), -100);
354 ts.addValue(seconds(0), -200);
355 ts.addValue(seconds(0), -300);
356 ts.addValue(seconds(0), -200, 65535);
358 EXPECT_DOUBLE_EQ(-200.0, ts.avg());
359 EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>());
360 EXPECT_EQ(-200, ts.avg<int64_t>());
361 EXPECT_EQ(-200, ts.avg<int32_t>());
362 EXPECT_EQ(-200, ts.avg<int16_t>());
366 // Test uint64_t values that would overflow int64_t
367 BucketedTimeSeries<uint64_t> ts(60, seconds(600));
368 ts.addValueAggregated(seconds(0),
369 std::numeric_limits<uint64_t>::max(),
370 std::numeric_limits<uint64_t>::max());
372 EXPECT_DOUBLE_EQ(1.0, ts.avg());
373 EXPECT_DOUBLE_EQ(1.0, ts.avg<float>());
374 EXPECT_EQ(1, ts.avg<uint64_t>());
375 EXPECT_EQ(1, ts.avg<int64_t>());
376 EXPECT_EQ(1, ts.avg<int8_t>());
380 // Test doubles with small-ish values that will fit in integer types
381 BucketedTimeSeries<double> ts(60, seconds(600));
382 ts.addValue(seconds(0), 4.0, 100);
383 ts.addValue(seconds(0), 10.0, 200);
384 ts.addValue(seconds(0), 16.0, 100);
386 EXPECT_DOUBLE_EQ(10.0, ts.avg());
387 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
388 EXPECT_EQ(10, ts.avg<uint64_t>());
389 EXPECT_EQ(10, ts.avg<int64_t>());
390 EXPECT_EQ(10, ts.avg<int32_t>());
391 EXPECT_EQ(10, ts.avg<int16_t>());
392 EXPECT_EQ(10, ts.avg<int8_t>());
393 EXPECT_EQ(10, ts.avg<uint8_t>());
397 // Test doubles with huge values
398 BucketedTimeSeries<double> ts(60, seconds(600));
399 ts.addValue(seconds(0), 1e19, 100);
400 ts.addValue(seconds(0), 2e19, 200);
401 ts.addValue(seconds(0), 3e19, 100);
403 EXPECT_DOUBLE_EQ(ts.avg(), 2e19);
404 EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11);
408 // Test doubles where the sum adds up larger than a uint64_t,
409 // but the average fits in an int64_t
410 BucketedTimeSeries<double> ts(60, seconds(600));
411 uint64_t value = 0x3fffffffffffffff;
412 FOR_EACH_RANGE(i, 0, 16) {
413 ts.addValue(seconds(0), value);
416 EXPECT_DOUBLE_EQ(value, ts.avg());
417 EXPECT_DOUBLE_EQ(value, ts.avg<float>());
418 // Some precision is lost here due to the huge sum, so the
419 // integer average returned is off by one.
420 EXPECT_NEAR(value, ts.avg<uint64_t>(), 1);
421 EXPECT_NEAR(value, ts.avg<int64_t>(), 1);
425 // Test BucketedTimeSeries with a smaller integer type
426 BucketedTimeSeries<int16_t> ts(60, seconds(600));
427 FOR_EACH_RANGE(i, 0, 101) {
428 ts.addValue(seconds(0), i);
431 EXPECT_DOUBLE_EQ(50.0, ts.avg());
432 EXPECT_DOUBLE_EQ(50.0, ts.avg<float>());
433 EXPECT_EQ(50, ts.avg<uint64_t>());
434 EXPECT_EQ(50, ts.avg<int64_t>());
435 EXPECT_EQ(50, ts.avg<int16_t>());
436 EXPECT_EQ(50, ts.avg<int8_t>());
440 // Test BucketedTimeSeries with long double input
441 BucketedTimeSeries<long double> ts(60, seconds(600));
442 ts.addValueAggregated(seconds(0), 1000.0L, 7);
444 long double expected = 1000.0L / 7.0L;
445 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
446 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
447 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
448 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
449 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
453 // Test BucketedTimeSeries with int64_t values,
454 // but using an average that requires a fair amount of precision.
455 BucketedTimeSeries<int64_t> ts(60, seconds(600));
456 ts.addValueAggregated(seconds(0), 1000, 7);
458 long double expected = 1000.0L / 7.0L;
459 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
460 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
461 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
462 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
463 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
467 TEST(BucketedTimeSeries, forEachBucket) {
468 typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
470 BucketInfo(const Bucket* b, seconds s, seconds ns)
471 : bucket(b), start(s), nextStart(ns) {}
473 const Bucket* bucket;
478 for (const auto& data : testData) {
479 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
481 vector<BucketInfo> info;
482 auto fn = [&](const Bucket& bucket, seconds bucketStart,
483 seconds bucketEnd) -> bool {
484 info.emplace_back(&bucket, bucketStart, bucketEnd);
488 // If we haven't yet added any data, the current bucket will start at 0,
489 // and all data previous buckets will have negative times.
490 ts.forEachBucket(fn);
492 CHECK_EQ(data.numBuckets, info.size());
494 // Check the data passed in to the function
496 size_t bucketIdx = 1;
497 ssize_t offset = -data.duration;
498 for (size_t n = 0; n < data.numBuckets; ++n) {
499 if (bucketIdx >= data.numBuckets) {
501 offset += data.duration;
504 EXPECT_EQ(data.bucketStarts[bucketIdx] + offset,
505 info[infoIdx].start.count()) <<
506 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
507 bucketIdx << ", infoIdx=" << infoIdx;
509 size_t nextBucketIdx = bucketIdx + 1;
510 ssize_t nextOffset = offset;
511 if (nextBucketIdx >= data.numBuckets) {
513 nextOffset += data.duration;
515 EXPECT_EQ(data.bucketStarts[nextBucketIdx] + nextOffset,
516 info[infoIdx].nextStart.count()) <<
517 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
518 bucketIdx << ", infoIdx=" << infoIdx;
520 EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
528 TEST(BucketedTimeSeries, queryByIntervalSimple) {
529 BucketedTimeSeries<int> a(3, seconds(12));
530 for (int i = 0; i < 8; i++) {
531 a.addValue(seconds(i), 1);
533 // We added 1 at each second from 0..7
534 // Query from the time period 0..2.
535 // This is entirely in the first bucket, which has a sum of 4.
536 // The code knows only part of the bucket is covered, and correctly
537 // estimates the desired sum as 3.
538 EXPECT_EQ(2, a.sum(seconds(0), seconds(2)));
541 TEST(BucketedTimeSeries, queryByInterval) {
542 // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets
543 const int kNumBuckets = 3;
544 const int kDuration = 6;
545 BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration));
547 for (unsigned int i = 0; i < kDuration; ++i) {
548 // add value 'i' at time 'i'
549 b.addValue(seconds(i), i);
552 // Current bucket state:
553 // 0: time=[0, 2): values=(0, 1), sum=1, count=2
554 // 1: time=[2, 4): values=(2, 3), sum=5, count=1
555 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
556 double expectedSums1[kDuration + 1][kDuration + 1] = {
557 {0, 4.5, 9, 11.5, 14, 14.5, 15},
558 {0, 4.5, 7, 9.5, 10, 10.5, -1},
559 {0, 2.5, 5, 5.5, 6, -1, -1},
560 {0, 2.5, 3, 3.5, -1, -1, -1},
561 {0, 0.5, 1, -1, -1, -1, -1},
562 {0, 0.5, -1, -1, -1, -1, -1},
563 {0, -1, -1, -1, -1, -1, -1}
565 int expectedCounts1[kDuration + 1][kDuration + 1] = {
566 {0, 1, 2, 3, 4, 5, 6},
567 {0, 1, 2, 3, 4, 5, -1},
568 {0, 1, 2, 3, 4, -1, -1},
569 {0, 1, 2, 3, -1, -1, -1},
570 {0, 1, 2, -1, -1, -1, -1},
571 {0, 1, -1, -1, -1, -1, -1},
572 {0, -1, -1, -1, -1, -1, -1}
575 seconds currentTime = b.getLatestTime() + seconds(1);
576 for (int i = 0; i <= kDuration + 1; i++) {
577 for (int j = 0; j <= kDuration - i; j++) {
578 seconds start = currentTime - seconds(i + j);
579 seconds end = currentTime - seconds(i);
580 double expectedSum = expectedSums1[i][j];
581 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
582 "i=" << i << ", j=" << j <<
583 ", interval=[" << start.count() << ", " << end.count() << ")";
585 uint64_t expectedCount = expectedCounts1[i][j];
586 EXPECT_EQ(expectedCount, b.count(start, end)) <<
587 "i=" << i << ", j=" << j <<
588 ", interval=[" << start.count() << ", " << end.count() << ")";
590 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
591 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
592 "i=" << i << ", j=" << j <<
593 ", interval=[" << start.count() << ", " << end.count() << ")";
595 double expectedRate = j ? expectedSum / j : 0;
596 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
597 "i=" << i << ", j=" << j <<
598 ", interval=[" << start.count() << ", " << end.count() << ")";
602 // Add 3 more values.
603 // This will overwrite 1 full bucket, and put us halfway through the next.
604 for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
605 b.addValue(seconds(i), i);
607 EXPECT_EQ(seconds(4), b.getEarliestTime());
609 // Current bucket state:
610 // 0: time=[6, 8): values=(6, 7), sum=13, count=2
611 // 1: time=[8, 10): values=(8), sum=8, count=1
612 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
613 double expectedSums2[kDuration + 1][kDuration + 1] = {
614 {0, 8, 14.5, 21, 25.5, 30, 30},
615 {0, 6.5, 13, 17.5, 22, 22, -1},
616 {0, 6.5, 11, 15.5, 15.5, -1, -1},
617 {0, 4.5, 9, 9, -1, -1, -1},
618 {0, 4.5, 4.5, -1, -1, -1, -1},
619 {0, 0, -1, -1, -1, -1, -1},
620 {0, -1, -1, -1, -1, -1, -1}
622 int expectedCounts2[kDuration + 1][kDuration + 1] = {
623 {0, 1, 2, 3, 4, 5, 5},
624 {0, 1, 2, 3, 4, 4, -1},
625 {0, 1, 2, 3, 3, -1, -1},
626 {0, 1, 2, 2, -1, -1, -1},
627 {0, 1, 1, -1, -1, -1, -1},
628 {0, 0, -1, -1, -1, -1, -1},
629 {0, -1, -1, -1, -1, -1, -1}
632 currentTime = b.getLatestTime() + seconds(1);
633 for (int i = 0; i <= kDuration + 1; i++) {
634 for (int j = 0; j <= kDuration - i; j++) {
635 seconds start = currentTime - seconds(i + j);
636 seconds end = currentTime - seconds(i);
637 double expectedSum = expectedSums2[i][j];
638 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
639 "i=" << i << ", j=" << j <<
640 ", interval=[" << start.count() << ", " << end.count() << ")";
642 uint64_t expectedCount = expectedCounts2[i][j];
643 EXPECT_EQ(expectedCount, b.count(start, end)) <<
644 "i=" << i << ", j=" << j <<
645 ", interval=[" << start.count() << ", " << end.count() << ")";
647 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
648 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
649 "i=" << i << ", j=" << j <<
650 ", interval=[" << start.count() << ", " << end.count() << ")";
652 seconds dataStart = std::max(start, b.getEarliestTime());
653 seconds dataEnd = std::max(end, dataStart);
654 seconds expectedInterval = dataEnd - dataStart;
655 EXPECT_EQ(expectedInterval, b.elapsed(start, end)) <<
656 "i=" << i << ", j=" << j <<
657 ", interval=[" << start.count() << ", " << end.count() << ")";
659 double expectedRate = expectedInterval.count() ?
660 expectedSum / expectedInterval.count() : 0;
661 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
662 "i=" << i << ", j=" << j <<
663 ", interval=[" << start.count() << ", " << end.count() << ")";
668 TEST(BucketedTimeSeries, rateByInterval) {
669 const int kNumBuckets = 5;
670 const seconds kDuration(10);
671 BucketedTimeSeries<double> b(kNumBuckets, kDuration);
673 // Add data points at a constant rate of 10 per second.
674 // Start adding data points at kDuration, and fill half of the buckets for
676 seconds start = kDuration;
677 seconds end = kDuration + (kDuration / 2);
678 const double kFixedRate = 10.0;
679 for (seconds i = start; i < end; ++i) {
680 b.addValue(i, kFixedRate);
683 // Querying the rate should yield kFixedRate.
684 EXPECT_EQ(kFixedRate, b.rate());
685 EXPECT_EQ(kFixedRate, b.rate(start, end));
686 EXPECT_EQ(kFixedRate, b.rate(start, start + kDuration));
687 EXPECT_EQ(kFixedRate, b.rate(end - kDuration, end));
688 EXPECT_EQ(kFixedRate, b.rate(end - seconds(1), end));
689 // We have been adding 1 data point per second, so countRate()
691 EXPECT_EQ(1.0, b.countRate());
692 EXPECT_EQ(1.0, b.countRate(start, end));
693 EXPECT_EQ(1.0, b.countRate(start, start + kDuration));
694 EXPECT_EQ(1.0, b.countRate(end - kDuration, end));
695 EXPECT_EQ(1.0, b.countRate(end - seconds(1), end));
697 // We haven't added anything before time kDuration.
698 // Querying data earlier than this should result in a rate of 0.
699 EXPECT_EQ(0.0, b.rate(seconds(0), seconds(1)));
700 EXPECT_EQ(0.0, b.countRate(seconds(0), seconds(1)));
702 // Fill the remainder of the timeseries from kDuration to kDuration*2
705 for (seconds i = start; i < end; ++i) {
706 b.addValue(i, kFixedRate);
709 EXPECT_EQ(kFixedRate, b.rate());
710 EXPECT_EQ(kFixedRate, b.rate(kDuration, kDuration * 2));
711 EXPECT_EQ(kFixedRate, b.rate(seconds(0), kDuration * 2));
712 EXPECT_EQ(kFixedRate, b.rate(seconds(0), kDuration * 10));
713 EXPECT_EQ(1.0, b.countRate());
714 EXPECT_EQ(1.0, b.countRate(kDuration, kDuration * 2));
715 EXPECT_EQ(1.0, b.countRate(seconds(0), kDuration * 2));
716 EXPECT_EQ(1.0, b.countRate(seconds(0), kDuration * 10));
719 TEST(BucketedTimeSeries, addHistorical) {
720 const int kNumBuckets = 5;
721 const seconds kDuration(10);
722 BucketedTimeSeries<double> b(kNumBuckets, kDuration);
724 // Initially fill with a constant rate of data
725 for (seconds i = seconds(0); i < seconds(10); ++i) {
729 EXPECT_EQ(10.0, b.rate());
730 EXPECT_EQ(10.0, b.avg());
731 EXPECT_EQ(10, b.count());
733 // Add some more data points to the middle bucket
734 b.addValue(seconds(4), 40.0);
735 b.addValue(seconds(5), 40.0);
736 EXPECT_EQ(15.0, b.avg());
737 EXPECT_EQ(18.0, b.rate());
738 EXPECT_EQ(12, b.count());
740 // Now start adding more current data points, until we are about to roll over
741 // the bucket where we added the extra historical data.
742 for (seconds i = seconds(10); i < seconds(14); ++i) {
745 EXPECT_EQ(15.0, b.avg());
746 EXPECT_EQ(18.0, b.rate());
747 EXPECT_EQ(12, b.count());
749 // Now roll over the middle bucket
750 b.addValue(seconds(14), 10.0);
751 b.addValue(seconds(15), 10.0);
752 EXPECT_EQ(10.0, b.avg());
753 EXPECT_EQ(10.0, b.rate());
754 EXPECT_EQ(10, b.count());
756 // Add more historical values past the bucket window.
757 // These should be ignored.
758 EXPECT_FALSE(b.addValue(seconds(4), 40.0));
759 EXPECT_FALSE(b.addValue(seconds(5), 40.0));
760 EXPECT_EQ(10.0, b.avg());
761 EXPECT_EQ(10.0, b.rate());
762 EXPECT_EQ(10, b.count());
773 const seconds kMinuteHourDurations[] = {
774 seconds(60), seconds(3600), seconds(0)
778 TEST(MinuteHourTimeSeries, Basic) {
779 folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
780 IntMHTS::kMinuteHourDurations);
781 EXPECT_EQ(mhts.numLevels(), IntMHTS::NUM_LEVELS);
782 EXPECT_EQ(mhts.numLevels(), 3);
784 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 0);
785 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 0);
786 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
788 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 0);
789 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 0);
790 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 0);
792 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 0);
793 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 0);
794 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 0);
796 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 0);
797 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 0);
798 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 0);
802 mhts.addValue(cur_time++, 10);
805 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 1);
806 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 1);
807 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 1);
809 for (int i = 0; i < 299; ++i) {
810 mhts.addValue(cur_time++, 10);
814 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
815 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 300);
816 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 300);
818 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
819 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 300*10);
820 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 300*10);
822 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
823 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
824 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
826 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
827 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
828 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
830 for (int i = 0; i < 3600*3 - 300; ++i) {
831 mhts.addValue(cur_time++, 10);
835 EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
836 EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 3600);
837 EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 3600*3);
839 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
840 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*10);
841 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 3600*3*10);
843 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
844 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
845 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
847 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
848 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
849 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
851 for (int i = 0; i < 3600; ++i) {
852 mhts.addValue(cur_time++, 100);
856 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*100);
857 EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*100);
858 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
859 3600*3*10 + 3600*100);
861 EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 100);
862 EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 100);
863 EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 32.5);
864 EXPECT_EQ(mhts.avg<int>(IntMHTS::ALLTIME), 32);
866 EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 100);
867 EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 100);
868 EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 32.5);
869 EXPECT_EQ(mhts.rate<int>(IntMHTS::ALLTIME), 32);
871 for (int i = 0; i < 1800; ++i) {
872 mhts.addValue(cur_time++, 120);
876 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*120);
877 EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
878 1800*100 + 1800*120);
879 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
880 3600*3*10 + 3600*100 + 1800*120);
882 for (int i = 0; i < 60; ++i) {
883 mhts.addValue(cur_time++, 1000);
887 EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*1000);
888 EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
889 1740*100 + 1800*120 + 60*1000);
890 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
891 3600*3*10 + 3600*100 + 1800*120 + 60*1000);
894 EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
897 TEST(MinuteHourTimeSeries, QueryByInterval) {
898 folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
899 IntMHTS::kMinuteHourDurations);
902 for (curTime = seconds(0); curTime < seconds(7200); curTime++) {
903 mhts.addValue(curTime, 1);
905 for (curTime = seconds(7200); curTime < seconds(7200 + 3540); curTime++) {
906 mhts.addValue(curTime, 10);
908 for (curTime = seconds(7200 + 3540); curTime < seconds(7200 + 3600);
910 mhts.addValue(curTime, 100);
914 struct TimeInterval {
918 TimeInterval intervals[12] = {
919 { curTime - seconds(60), curTime },
920 { curTime - seconds(3600), curTime },
921 { curTime - seconds(7200), curTime },
922 { curTime - seconds(3600), curTime - seconds(60) },
923 { curTime - seconds(7200), curTime - seconds(60) },
924 { curTime - seconds(7200), curTime - seconds(3600) },
925 { curTime - seconds(50), curTime - seconds(20) },
926 { curTime - seconds(3020), curTime - seconds(20) },
927 { curTime - seconds(7200), curTime - seconds(20) },
928 { curTime - seconds(3000), curTime - seconds(1000) },
929 { curTime - seconds(7200), curTime - seconds(1000) },
930 { curTime - seconds(7200), curTime - seconds(3600) },
933 int expectedSums[12] = {
934 6000, 41400, 32400, 35400, 32130, 16200, 3000, 33600, 32310, 20000, 27900,
938 int expectedCounts[12] = {
939 60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600
942 for (int i = 0; i < 12; ++i) {
943 TimeInterval interval = intervals[i];
945 int s = mhts.sum(interval.start, interval.end);
946 EXPECT_EQ(expectedSums[i], s);
948 int c = mhts.count(interval.start, interval.end);
949 EXPECT_EQ(expectedCounts[i], c);
951 int a = mhts.avg<int>(interval.start, interval.end);
952 EXPECT_EQ(expectedCounts[i] ?
953 (expectedSums[i] / expectedCounts[i]) : 0,
956 int r = mhts.rate<int>(interval.start, interval.end);
958 expectedSums[i] / (interval.end - interval.start).count();
959 EXPECT_EQ(expectedRate, r);
963 TEST(MultiLevelTimeSeries, Basic) {
964 // using constructor with initializer_list parameter
965 folly::MultiLevelTimeSeries<int> mhts(
966 60, {seconds(60), seconds(3600), seconds(0)});
967 EXPECT_EQ(mhts.numLevels(), 3);
969 EXPECT_EQ(mhts.sum(seconds(60)), 0);
970 EXPECT_EQ(mhts.sum(seconds(3600)), 0);
971 EXPECT_EQ(mhts.sum(seconds(0)), 0);
973 EXPECT_EQ(mhts.avg(seconds(60)), 0);
974 EXPECT_EQ(mhts.avg(seconds(3600)), 0);
975 EXPECT_EQ(mhts.avg(seconds(0)), 0);
977 EXPECT_EQ(mhts.rate(seconds(60)), 0);
978 EXPECT_EQ(mhts.rate(seconds(3600)), 0);
979 EXPECT_EQ(mhts.rate(seconds(0)), 0);
981 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 0);
982 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 0);
983 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 0);
987 mhts.addValue(cur_time++, 10);
990 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 1);
991 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 1);
992 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 1);
994 for (int i = 0; i < 299; ++i) {
995 mhts.addValue(cur_time++, 10);
999 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
1000 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 300);
1001 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 300);
1003 EXPECT_EQ(mhts.sum(seconds(60)), 600);
1004 EXPECT_EQ(mhts.sum(seconds(3600)), 300 * 10);
1005 EXPECT_EQ(mhts.sum(seconds(0)), 300 * 10);
1007 EXPECT_EQ(mhts.avg(seconds(60)), 10);
1008 EXPECT_EQ(mhts.avg(seconds(3600)), 10);
1009 EXPECT_EQ(mhts.avg(seconds(0)), 10);
1011 EXPECT_EQ(mhts.rate(seconds(60)), 10);
1012 EXPECT_EQ(mhts.rate(seconds(3600)), 10);
1013 EXPECT_EQ(mhts.rate(seconds(0)), 10);
1015 for (int i = 0; i < 3600 * 3 - 300; ++i) {
1016 mhts.addValue(cur_time++, 10);
1020 EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
1021 EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 3600);
1022 EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 3600 * 3);
1024 EXPECT_EQ(mhts.sum(seconds(60)), 600);
1025 EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 10);
1026 EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10);
1028 EXPECT_EQ(mhts.avg(seconds(60)), 10);
1029 EXPECT_EQ(mhts.avg(seconds(3600)), 10);
1030 EXPECT_EQ(mhts.avg(seconds(0)), 10);
1032 EXPECT_EQ(mhts.rate(seconds(60)), 10);
1033 EXPECT_EQ(mhts.rate(seconds(3600)), 10);
1034 EXPECT_EQ(mhts.rate(seconds(0)), 10);
1036 for (int i = 0; i < 3600; ++i) {
1037 mhts.addValue(cur_time++, 100);
1041 EXPECT_EQ(mhts.sum(seconds(60)), 60 * 100);
1042 EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 100);
1043 EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100);
1045 EXPECT_EQ(mhts.avg(seconds(60)), 100);
1046 EXPECT_EQ(mhts.avg(seconds(3600)), 100);
1047 EXPECT_EQ(mhts.avg(seconds(0)), 32.5);
1048 EXPECT_EQ(mhts.avg<int>(seconds(0)), 32);
1050 EXPECT_EQ(mhts.rate(seconds(60)), 100);
1051 EXPECT_EQ(mhts.rate(seconds(3600)), 100);
1052 EXPECT_EQ(mhts.rate(seconds(0)), 32.5);
1053 EXPECT_EQ(mhts.rate<int>(seconds(0)), 32);
1055 for (int i = 0; i < 1800; ++i) {
1056 mhts.addValue(cur_time++, 120);
1060 EXPECT_EQ(mhts.sum(seconds(60)), 60 * 120);
1061 EXPECT_EQ(mhts.sum(seconds(3600)), 1800 * 100 + 1800 * 120);
1062 EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100 + 1800 * 120);
1064 for (int i = 0; i < 60; ++i) {
1065 mhts.addValue(cur_time++, 1000);
1069 EXPECT_EQ(mhts.sum(seconds(60)), 60 * 1000);
1070 EXPECT_EQ(mhts.sum(seconds(3600)), 1740 * 100 + 1800 * 120 + 60 * 1000);
1072 mhts.sum(seconds(0)),
1073 3600 * 3 * 10 + 3600 * 100 + 1800 * 120 + 60 * 1000);
1076 EXPECT_EQ(mhts.sum(seconds(0)), 0);
1079 TEST(MultiLevelTimeSeries, QueryByInterval) {
1080 folly::MultiLevelTimeSeries<int> mhts(
1081 60, {seconds(60), seconds(3600), seconds(0)});
1084 for (curTime = seconds(0); curTime < seconds(7200); curTime++) {
1085 mhts.addValue(curTime, 1);
1087 for (curTime = seconds(7200); curTime < seconds(7200 + 3540); curTime++) {
1088 mhts.addValue(curTime, 10);
1090 for (curTime = seconds(7200 + 3540); curTime < seconds(7200 + 3600);
1092 mhts.addValue(curTime, 100);
1096 struct TimeInterval {
1101 std::array<TimeInterval, 12> intervals = {{
1102 {curTime - seconds(60), curTime},
1103 {curTime - seconds(3600), curTime},
1104 {curTime - seconds(7200), curTime},
1105 {curTime - seconds(3600), curTime - seconds(60)},
1106 {curTime - seconds(7200), curTime - seconds(60)},
1107 {curTime - seconds(7200), curTime - seconds(3600)},
1108 {curTime - seconds(50), curTime - seconds(20)},
1109 {curTime - seconds(3020), curTime - seconds(20)},
1110 {curTime - seconds(7200), curTime - seconds(20)},
1111 {curTime - seconds(3000), curTime - seconds(1000)},
1112 {curTime - seconds(7200), curTime - seconds(1000)},
1113 {curTime - seconds(7200), curTime - seconds(3600)},
1116 std::array<int, 12> expectedSums = {{6000,
1129 std::array<int, 12> expectedCounts = {
1130 {60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600}};
1132 for (size_t i = 0; i < intervals.size(); ++i) {
1133 TimeInterval interval = intervals[i];
1135 int s = mhts.sum(interval.start, interval.end);
1136 EXPECT_EQ(expectedSums[i], s);
1138 int c = mhts.count(interval.start, interval.end);
1139 EXPECT_EQ(expectedCounts[i], c);
1141 int a = mhts.avg<int>(interval.start, interval.end);
1142 EXPECT_EQ(expectedCounts[i] ? (expectedSums[i] / expectedCounts[i]) : 0, a);
1144 int r = mhts.rate<int>(interval.start, interval.end);
1146 expectedSums[i] / (interval.end - interval.start).count();
1147 EXPECT_EQ(expectedRate, r);