* Helper function to compute the rate per Interval,
* given the specified count recorded over the elapsed time period.
*/
-template <typename ReturnType=double,
- typename TimeType=std::chrono::seconds,
- typename Interval=TimeType>
-ReturnType rateHelper(ReturnType count, TimeType elapsed) {
- if (elapsed == TimeType(0)) {
+template <
+ typename ReturnType = double,
+ typename Duration = std::chrono::seconds,
+ typename Interval = Duration>
+ReturnType rateHelper(ReturnType count, Duration elapsed) {
+ if (elapsed == Duration(0)) {
return 0;
}
// is less than the desired interval, which will incorrectly result in
// an infinite rate.
typedef std::chrono::duration<
- ReturnType, std::ratio<TimeType::period::den,
- TimeType::period::num>> NativeRate;
+ ReturnType,
+ std::ratio<Duration::period::den, Duration::period::num>>
+ NativeRate;
typedef std::chrono::duration<
ReturnType, std::ratio<Interval::period::den,
Interval::period::num>> DesiredRate;
template <typename VT, typename CT>
BucketedTimeSeries<VT, CT>::BucketedTimeSeries(
size_t nBuckets,
- TimeType maxDuration)
- : firstTime_(1), latestTime_(0), duration_(maxDuration) {
+ Duration maxDuration)
+ : firstTime_(Duration(1)), latestTime_(), duration_(maxDuration) {
// For tracking all-time data we only use total_, and don't need to bother
// with buckets_
if (!isAllTime()) {
}
template <typename VT, typename CT>
-bool BucketedTimeSeries<VT, CT>::addValue(TimeType now, const ValueType& val) {
+bool BucketedTimeSeries<VT, CT>::addValue(TimePoint now, const ValueType& val) {
return addValueAggregated(now, val, 1);
}
template <typename VT, typename CT>
bool BucketedTimeSeries<VT, CT>::addValue(
- TimeType now,
+ TimePoint now,
const ValueType& val,
int64_t times) {
return addValueAggregated(now, val * times, times);
template <typename VT, typename CT>
bool BucketedTimeSeries<VT, CT>::addValueAggregated(
- TimeType now,
+ TimePoint now,
const ValueType& total,
int64_t nsamples) {
if (isAllTime()) {
}
template <typename VT, typename CT>
-size_t BucketedTimeSeries<VT, CT>::update(TimeType now) {
+size_t BucketedTimeSeries<VT, CT>::update(TimePoint now) {
if (empty()) {
// This is the first data point.
firstTime_ = now;
}
template <typename VT, typename CT>
-size_t BucketedTimeSeries<VT, CT>::updateBuckets(TimeType now) {
+size_t BucketedTimeSeries<VT, CT>::updateBuckets(TimePoint now) {
// We could cache nextBucketStart as a member variable, so we don't have to
// recompute it each time update() is called with a new timestamp value.
// This makes things faster when update() (or addValue()) is called once
// Get info about the bucket that latestTime_ points at
size_t currentBucket;
- TimeType currentBucketStart;
- TimeType nextBucketStart;
+ TimePoint currentBucketStart;
+ TimePoint nextBucketStart;
getBucketInfo(latestTime_, ¤tBucket,
¤tBucketStart, &nextBucketStart);
total_.clear();
// Set firstTime_ larger than latestTime_,
// to indicate that the timeseries is empty
- firstTime_ = TimeType(1);
- latestTime_ = TimeType(0);
+ firstTime_ = TimePoint(Duration(1));
+ latestTime_ = TimePoint();
}
template <typename VT, typename CT>
-typename CT::duration BucketedTimeSeries<VT, CT>::getEarliestTime() const {
+typename CT::time_point BucketedTimeSeries<VT, CT>::getEarliestTime() const {
if (empty()) {
- return TimeType(0);
+ return TimePoint();
}
if (isAllTime()) {
return firstTime_;
}
// Compute the earliest time we can track
- TimeType earliestTime = getEarliestTimeNonEmpty();
+ TimePoint earliestTime = getEarliestTimeNonEmpty();
// We're never tracking data before firstTime_
earliestTime = std::max(earliestTime, firstTime_);
}
template <typename VT, typename CT>
-typename CT::duration BucketedTimeSeries<VT, CT>::getEarliestTimeNonEmpty()
+typename CT::time_point BucketedTimeSeries<VT, CT>::getEarliestTimeNonEmpty()
const {
size_t currentBucket;
- TimeType currentBucketStart;
- TimeType nextBucketStart;
+ TimePoint currentBucketStart;
+ TimePoint nextBucketStart;
getBucketInfo(latestTime_, ¤tBucket,
¤tBucketStart, &nextBucketStart);
template <typename VT, typename CT>
typename CT::duration BucketedTimeSeries<VT, CT>::elapsed() const {
if (empty()) {
- return TimeType(0);
+ return Duration(0);
}
// Add 1 since [latestTime_, earliestTime] is an inclusive interval.
- return latestTime_ - getEarliestTime() + TimeType(1);
+ return latestTime_ - getEarliestTime() + Duration(1);
}
template <typename VT, typename CT>
typename CT::duration BucketedTimeSeries<VT, CT>::elapsed(
- TimeType start,
- TimeType end) const {
+ TimePoint start,
+ TimePoint end) const {
if (empty()) {
- return TimeType(0);
+ return Duration(0);
}
start = std::max(start, getEarliestTime());
- end = std::min(end, latestTime_ + TimeType(1));
+ end = std::min(end, latestTime_ + Duration(1));
end = std::max(start, end);
return end - start;
}
template <typename VT, typename CT>
-VT BucketedTimeSeries<VT, CT>::sum(TimeType start, TimeType end) const {
+VT BucketedTimeSeries<VT, CT>::sum(TimePoint start, TimePoint end) const {
ValueType total = ValueType();
- forEachBucket(start, end, [&](const Bucket& bucket,
- TimeType bucketStart,
- TimeType nextBucketStart) -> bool {
- total += this->rangeAdjust(bucketStart, nextBucketStart, start, end,
- bucket.sum);
- return true;
- });
+ forEachBucket(
+ start,
+ end,
+ [&](const Bucket& bucket,
+ TimePoint bucketStart,
+ TimePoint nextBucketStart) -> bool {
+ total += this->rangeAdjust(
+ bucketStart, nextBucketStart, start, end, bucket.sum);
+ return true;
+ });
return total;
}
template <typename VT, typename CT>
-uint64_t BucketedTimeSeries<VT, CT>::count(TimeType start, TimeType end) const {
+uint64_t BucketedTimeSeries<VT, CT>::count(TimePoint start, TimePoint end)
+ const {
uint64_t sample_count = 0;
- forEachBucket(start, end, [&](const Bucket& bucket,
- TimeType bucketStart,
- TimeType nextBucketStart) -> bool {
- sample_count += this->rangeAdjust(bucketStart, nextBucketStart, start, end,
- bucket.count);
- return true;
- });
+ forEachBucket(
+ start,
+ end,
+ [&](const Bucket& bucket,
+ TimePoint bucketStart,
+ TimePoint nextBucketStart) -> bool {
+ sample_count += this->rangeAdjust(
+ bucketStart, nextBucketStart, start, end, bucket.count);
+ return true;
+ });
return sample_count;
}
template <typename VT, typename CT>
template <typename ReturnType>
-ReturnType BucketedTimeSeries<VT, CT>::avg(TimeType start, TimeType end) const {
+ReturnType BucketedTimeSeries<VT, CT>::avg(TimePoint start, TimePoint end)
+ const {
ValueType total = ValueType();
uint64_t sample_count = 0;
- forEachBucket(start, end, [&](const Bucket& bucket,
- TimeType bucketStart,
- TimeType nextBucketStart) -> bool {
- total += this->rangeAdjust(bucketStart, nextBucketStart, start, end,
- bucket.sum);
- sample_count += this->rangeAdjust(bucketStart, nextBucketStart, start, end,
- bucket.count);
- return true;
- });
+ forEachBucket(
+ start,
+ end,
+ [&](const Bucket& bucket,
+ TimePoint bucketStart,
+ TimePoint nextBucketStart) -> bool {
+ total += this->rangeAdjust(
+ bucketStart, nextBucketStart, start, end, bucket.sum);
+ sample_count += this->rangeAdjust(
+ bucketStart, nextBucketStart, start, end, bucket.count);
+ return true;
+ });
if (sample_count == 0) {
return ReturnType(0);
*/
template <typename VT, typename CT>
-size_t BucketedTimeSeries<VT, CT>::getBucketIdx(TimeType time) const {
+size_t BucketedTimeSeries<VT, CT>::getBucketIdx(TimePoint time) const {
// For all-time data we don't use buckets_. Everything is tracked in total_.
DCHECK(!isAllTime());
- time %= duration_;
- return time.count() * buckets_.size() / duration_.count();
+ auto timeIntoCurrentCycle = (time.time_since_epoch() % duration_);
+ return timeIntoCurrentCycle.count() * buckets_.size() / duration_.count();
}
/*
*/
template <typename VT, typename CT>
void BucketedTimeSeries<VT, CT>::getBucketInfo(
- TimeType time,
+ TimePoint time,
size_t* bucketIdx,
- TimeType* bucketStart,
- TimeType* nextBucketStart) const {
- typedef typename TimeType::rep TimeInt;
+ TimePoint* bucketStart,
+ TimePoint* nextBucketStart) const {
+ typedef typename Duration::rep TimeInt;
DCHECK(!isAllTime());
// Keep these two lines together. The compiler should be able to compute
// both the division and modulus with a single operation.
- TimeType timeMod = time % duration_;
- TimeInt numFullDurations = time / duration_;
+ Duration timeMod = time.time_since_epoch() % duration_;
+ TimeInt numFullDurations = time.time_since_epoch() / duration_;
TimeInt scaledTime = timeMod.count() * buckets_.size();
TimeInt scaledBucketStart = scaledTime - scaledOffsetInBucket;
TimeInt scaledNextBucketStart = scaledBucketStart + duration_.count();
- TimeType bucketStartMod((scaledBucketStart + buckets_.size() - 1) /
- buckets_.size());
- TimeType nextBucketStartMod((scaledNextBucketStart + buckets_.size() - 1) /
- buckets_.size());
+ Duration bucketStartMod(
+ (scaledBucketStart + buckets_.size() - 1) / buckets_.size());
+ Duration nextBucketStartMod(
+ (scaledNextBucketStart + buckets_.size() - 1) / buckets_.size());
- TimeType durationStart(numFullDurations * duration_.count());
+ TimePoint durationStart(numFullDurations * duration_);
*bucketStart = bucketStartMod + durationStart;
*nextBucketStart = nextBucketStartMod + durationStart;
}
template <typename Function>
void BucketedTimeSeries<VT, CT>::forEachBucket(Function fn) const {
if (isAllTime()) {
- fn(total_, firstTime_, latestTime_ + TimeType(1));
+ fn(total_, firstTime_, latestTime_ + Duration(1));
return;
}
- typedef typename TimeType::rep TimeInt;
+ typedef typename Duration::rep TimeInt;
// Compute durationStart, latestBucketIdx, and scaledNextBucketStart,
// the same way as in getBucketInfo().
- TimeType timeMod = latestTime_ % duration_;
- TimeInt numFullDurations = latestTime_ / duration_;
- TimeType durationStart(numFullDurations * duration_.count());
+ Duration timeMod = latestTime_.time_since_epoch() % duration_;
+ TimeInt numFullDurations = latestTime_.time_since_epoch() / duration_;
+ TimePoint durationStart(numFullDurations * duration_);
TimeInt scaledTime = timeMod.count() * buckets_.size();
size_t latestBucketIdx = scaledTime / duration_.count();
TimeInt scaledOffsetInBucket = scaledTime % duration_.count();
size_t idx = latestBucketIdx;
durationStart -= duration_;
- TimeType nextBucketStart =
- TimeType((scaledNextBucketStart + buckets_.size() - 1) / buckets_.size()) +
- durationStart;
+ TimePoint nextBucketStart =
+ Duration(
+ (scaledNextBucketStart + buckets_.size() - 1) / buckets_.size()) +
+ durationStart;
while (true) {
++idx;
if (idx >= buckets_.size()) {
scaledNextBucketStart += duration_.count();
}
- TimeType bucketStart = nextBucketStart;
- nextBucketStart = TimeType((scaledNextBucketStart + buckets_.size() - 1) /
- buckets_.size()) + durationStart;
+ TimePoint bucketStart = nextBucketStart;
+ nextBucketStart =
+ Duration(
+ (scaledNextBucketStart + buckets_.size() - 1) / buckets_.size()) +
+ durationStart;
// Should we bother skipping buckets where firstTime_ >= nextBucketStart?
// For now we go ahead and invoke the function with these buckets.
// sum and count should always be 0 in these buckets.
- DCHECK_LE(bucketStart.count(), latestTime_.count());
+ DCHECK_LE(
+ bucketStart.time_since_epoch().count(),
+ latestTime_.time_since_epoch().count());
bool ret = fn(buckets_[idx], bucketStart, nextBucketStart);
if (!ret) {
break;
*/
template <typename VT, typename CT>
VT BucketedTimeSeries<VT, CT>::rangeAdjust(
- TimeType bucketStart,
- TimeType nextBucketStart,
- TimeType start,
- TimeType end,
+ TimePoint bucketStart,
+ TimePoint nextBucketStart,
+ TimePoint start,
+ TimePoint end,
ValueType input) const {
// If nextBucketStart is greater than latestTime_, treat nextBucketStart as
// if it were latestTime_. This makes us more accurate when someone is
// downwards in this case, because the bucket really only has data up to
// latestTime_.
if (bucketStart <= latestTime_ && nextBucketStart > latestTime_) {
- nextBucketStart = latestTime_ + TimeType(1);
+ nextBucketStart = latestTime_ + Duration(1);
}
if (start <= bucketStart && end >= nextBucketStart) {
return input;
}
- TimeType intervalStart = std::max(start, bucketStart);
- TimeType intervalEnd = std::min(end, nextBucketStart);
+ TimePoint intervalStart = std::max(start, bucketStart);
+ TimePoint intervalEnd = std::min(end, nextBucketStart);
return input * (intervalEnd - intervalStart) /
(nextBucketStart - bucketStart);
}
template <typename VT, typename CT>
template <typename Function>
void BucketedTimeSeries<VT, CT>::forEachBucket(
- TimeType start,
- TimeType end,
+ TimePoint start,
+ TimePoint end,
Function fn) const {
- forEachBucket([&start, &end, &fn] (const Bucket& bucket, TimeType bucketStart,
- TimeType nextBucketStart) -> bool {
- if (start >= nextBucketStart) {
- return true;
- }
- if (end <= bucketStart) {
- return false;
- }
- bool ret = fn(bucket, bucketStart, nextBucketStart);
- return ret;
- });
+ forEachBucket(
+ [&start, &end, &fn](
+ const Bucket& bucket,
+ TimePoint bucketStart,
+ TimePoint nextBucketStart) -> bool {
+ if (start >= nextBucketStart) {
+ return true;
+ }
+ if (end <= bucketStart) {
+ return false;
+ }
+ bool ret = fn(bucket, bucketStart, nextBucketStart);
+ return ret;
+ });
}
} // folly
using Clock = CT;
using Duration = typename Clock::duration;
using TimePoint = typename Clock::time_point;
- // The legacy TimeType. The older code used this instead of Duration and
- // TimePoint. This will eventually be removed as the code is transitioned to
- // Duration and TimePoint.
- using TimeType = typename Clock::duration;
using Bucket = detail::Bucket<ValueType>;
/*
* Returns true on success, or false if now was older than the tracked time
* window.
*/
- bool addValue(TimeType now, const ValueType& val);
+ bool addValue(TimePoint now, const ValueType& val);
/*
* Adds the value 'val' the given number of 'times' at time 'now'
*/
- bool addValue(TimeType now, const ValueType& val, int64_t times);
+ bool addValue(TimePoint now, const ValueType& val, int64_t times);
/*
- * Adds the value 'sum' as the sum of 'nsamples' samples
+ * Adds the value 'total' as the sum of 'nsamples' samples
*/
- bool addValueAggregated(TimeType now, const ValueType& sum, int64_t nsamples);
+ bool
+ addValueAggregated(TimePoint now, const ValueType& total, int64_t nsamples);
/*
* Updates the container to the specified time, doing all the necessary
*
* Returns the current bucket index after the update.
*/
- size_t update(TimeType now);
+ size_t update(TimePoint now);
/*
* Reset the timeseries to an empty state,
*
* If no data has ever been added to this timeseries, 0 will be returned.
*/
- TimeType getLatestTime() const {
+ TimePoint getLatestTime() const {
return latestTime_;
}
* in the timeseries. This will never be older than (getLatestTime() -
* duration()).
*/
- TimeType getEarliestTime() const;
+ TimePoint getEarliestTime() const;
/*
* Return the number of buckets.
* Return the maximum duration of data that can be tracked by this
* BucketedTimeSeries.
*/
- TimeType duration() const {
+ Duration duration() const {
return duration_;
}
* ever rolling over into new buckets.
*/
bool isAllTime() const {
- return (duration_ == TimeType(0));
+ return (duration_ == Duration(0));
}
/*
* Note that you generally should call update() before calling elapsed(), to
* make sure you are not reading stale data.
*/
- TimeType elapsed() const;
+ Duration elapsed() const;
/*
* Get the amount of time tracked by this timeseries, between the specified
* simply returns (end - start). However, if start is earlier than
* getEarliestTime(), this returns (end - getEarliestTime()).
*/
- TimeType elapsed(TimeType start, TimeType end) const;
+ Duration elapsed(TimePoint start, TimePoint end) const;
/*
* Return the sum of all the data points currently tracked by this
* Note that you generally should call update() before calling rate(), to
* make sure you are not reading stale data.
*/
- template <typename ReturnType=double, typename Interval=TimeType>
+ template <typename ReturnType = double, typename Interval = Duration>
ReturnType rate() const {
return rateHelper<ReturnType, Interval>(total_.sum, elapsed());
}
* Note that you generally should call update() before calling countRate(),
* to make sure you are not reading stale data.
*/
- template <typename ReturnType=double, typename Interval=TimeType>
+ template <typename ReturnType = double, typename Interval = Duration>
ReturnType countRate() const {
return rateHelper<ReturnType, Interval>(total_.count, elapsed());
}
*
* Note that the value returned is an estimate, and may not be precise.
*/
- ValueType sum(TimeType start, TimeType end) const;
+ ValueType sum(TimePoint start, TimePoint end) const;
/*
* Estimate the number of data points that occurred in the specified time
* period.
*
- * The same caveats documented in the sum(TimeType start, TimeType end)
+ * The same caveats documented in the sum(TimePoint start, TimePoint end)
* comments apply here as well.
*/
- uint64_t count(TimeType start, TimeType end) const;
+ uint64_t count(TimePoint start, TimePoint end) const;
/*
* Estimate the average value during the specified time period.
*
- * The same caveats documented in the sum(TimeType start, TimeType end)
+ * The same caveats documented in the sum(TimePoint start, TimePoint end)
* comments apply here as well.
*/
- template <typename ReturnType=double>
- ReturnType avg(TimeType start, TimeType end) const;
+ template <typename ReturnType = double>
+ ReturnType avg(TimePoint start, TimePoint end) const;
/*
* Estimate the rate during the specified time period.
*
- * The same caveats documented in the sum(TimeType start, TimeType end)
+ * The same caveats documented in the sum(TimePoint start, TimePoint end)
* comments apply here as well.
*/
- template <typename ReturnType=double, typename Interval=TimeType>
- ReturnType rate(TimeType start, TimeType end) const {
+ template <typename ReturnType = double, typename Interval = Duration>
+ ReturnType rate(TimePoint start, TimePoint end) const {
ValueType intervalSum = sum(start, end);
- TimeType interval = elapsed(start, end);
+ Duration interval = elapsed(start, end);
return rateHelper<ReturnType, Interval>(intervalSum, interval);
}
* Estimate the rate of data points being added during the specified time
* period.
*
- * The same caveats documented in the sum(TimeType start, TimeType end)
+ * The same caveats documented in the sum(TimePoint start, TimePoint end)
* comments apply here as well.
*/
- template <typename ReturnType=double, typename Interval=TimeType>
- ReturnType countRate(TimeType start, TimeType end) const {
+ template <typename ReturnType = double, typename Interval = Duration>
+ ReturnType countRate(TimePoint start, TimePoint end) const {
uint64_t intervalCount = count(start, end);
- TimeType interval = elapsed(start, end);
+ Duration interval = elapsed(start, end);
return rateHelper<ReturnType, Interval>(intervalCount, interval);
}
* to break out of the loop and stop, without calling the function on any
* more buckets.
*
- * bool function(const Bucket& bucket, TimeType bucketStart,
- * TimeType nextBucketStart)
+ * bool function(const Bucket& bucket, TimePoint bucketStart,
+ * TimePoint nextBucketStart)
*/
template <typename Function>
void forEachBucket(Function fn) const;
*
* This method may not be called for all-time data.
*/
- size_t getBucketIdx(TimeType time) const;
+ size_t getBucketIdx(TimePoint time) const;
/*
* Get the bucket at the specified index.
*
* This method may not be called for all-time data.
*/
- void getBucketInfo(TimeType time, size_t* bucketIdx,
- TimeType* bucketStart, TimeType* nextBucketStart) const;
+ void getBucketInfo(
+ TimePoint time,
+ size_t* bucketIdx,
+ TimePoint* bucketStart,
+ TimePoint* nextBucketStart) const;
+
+ /*
+ * Legacy APIs that accept a Duration parameters rather than TimePoint.
+ *
+ * These treat the Duration as relative to the clock epoch.
+ * Prefer using the correct TimePoint-based APIs instead. These APIs will
+ * eventually be deprecated and removed.
+ */
+ bool addValue(Duration now, const ValueType& val) {
+ return addValueAggregated(TimePoint(now), val, 1);
+ }
+ bool addValue(Duration now, const ValueType& val, int64_t times) {
+ return addValueAggregated(TimePoint(now), val * times, times);
+ }
+ bool
+ addValueAggregated(Duration now, const ValueType& total, int64_t nsamples) {
+ return addValueAggregated(TimePoint(now), total, nsamples);
+ }
+ size_t update(Duration now) {
+ return update(TimePoint(now));
+ }
private:
- template <typename ReturnType=double, typename Interval=TimeType>
- ReturnType rateHelper(ReturnType numerator, TimeType elapsedTime) const {
- return detail::rateHelper<ReturnType, TimeType, Interval>(numerator,
- elapsedTime);
+ template <typename ReturnType = double, typename Interval = Duration>
+ ReturnType rateHelper(ReturnType numerator, Duration elapsedTime) const {
+ return detail::rateHelper<ReturnType, Duration, Interval>(
+ numerator, elapsedTime);
}
- TimeType getEarliestTimeNonEmpty() const;
- size_t updateBuckets(TimeType now);
+ TimePoint getEarliestTimeNonEmpty() const;
+ size_t updateBuckets(TimePoint now);
- ValueType rangeAdjust(TimeType bucketStart, TimeType nextBucketStart,
- TimeType start, TimeType end,
- ValueType input) const;
+ ValueType rangeAdjust(
+ TimePoint bucketStart,
+ TimePoint nextBucketStart,
+ TimePoint start,
+ TimePoint end,
+ ValueType input) const;
template <typename Function>
- void forEachBucket(TimeType start, TimeType end, Function fn) const;
+ void forEachBucket(TimePoint start, TimePoint end, Function fn) const;
- TimeType firstTime_; // time of first update() since clear()/constructor
- TimeType latestTime_; // time of last update()
- TimeType duration_; // total duration ("window length") of the time series
+ TimePoint firstTime_; // time of first update() since clear()/constructor
+ TimePoint latestTime_; // time of last update()
+ Duration duration_; // total duration ("window length") of the time series
Bucket total_; // sum and count of everything in time series
std::vector<Bucket> buckets_; // actual buckets of values
MultiLevelTimeSeries<VT, CT>::MultiLevelTimeSeries(
size_t nBuckets,
size_t nLevels,
- const TimeType levelDurations[])
- : cachedTime_(0), cachedSum_(0), cachedCount_(0) {
+ const Duration levelDurations[])
+ : cachedTime_(), cachedSum_(0), cachedCount_(0) {
CHECK_GT(nLevels, 0);
CHECK(levelDurations);
template <typename VT, typename CT>
MultiLevelTimeSeries<VT, CT>::MultiLevelTimeSeries(
size_t nBuckets,
- std::initializer_list<TimeType> durations)
- : cachedTime_(0), cachedSum_(0), cachedCount_(0) {
+ std::initializer_list<Duration> durations)
+ : cachedTime_(), cachedSum_(0), cachedCount_(0) {
CHECK_GT(durations.size(), 0);
levels_.reserve(durations.size());
int i = 0;
- TimeType prev;
+ Duration prev;
for (auto dur : durations) {
if (dur == Duration(0)) {
CHECK_EQ(i, durations.size() - 1);
template <typename VT, typename CT>
void MultiLevelTimeSeries<VT, CT>::addValue(
- TimeType now,
+ TimePoint now,
const ValueType& val) {
addValueAggregated(now, val, 1);
}
template <typename VT, typename CT>
void MultiLevelTimeSeries<VT, CT>::addValue(
- TimeType now,
+ TimePoint now,
const ValueType& val,
int64_t times) {
addValueAggregated(now, val * times, times);
template <typename VT, typename CT>
void MultiLevelTimeSeries<VT, CT>::addValueAggregated(
- TimeType now,
+ TimePoint now,
const ValueType& total,
int64_t nsamples) {
if (cachedTime_ != now) {
}
template <typename VT, typename CT>
-void MultiLevelTimeSeries<VT, CT>::update(TimeType now) {
+void MultiLevelTimeSeries<VT, CT>::update(TimePoint now) {
flush();
for (size_t i = 0; i < levels_.size(); ++i) {
levels_[i].update(now);
level.clear();
}
- cachedTime_ = TimeType(0);
+ cachedTime_ = TimePoint();
cachedSum_ = 0;
cachedCount_ = 0;
}
using Clock = CT;
using Duration = typename Clock::duration;
using TimePoint = typename Clock::time_point;
- // The legacy TimeType. The older code used this instead of Duration and
- // TimePoint. This will eventually be removed as the code is transitioned to
- // Duration and TimePoint.
- using TimeType = typename Clock::duration;
using Level = folly::BucketedTimeSeries<ValueType, Clock>;
/*
* be provided with a duration of '0' -- this will be an "all-time" level. If
* an all-time level is provided, it MUST be the last level present.
*/
- MultiLevelTimeSeries(size_t numBuckets,
- size_t numLevels,
- const TimeType levelDurations[]);
+ MultiLevelTimeSeries(
+ size_t numBuckets,
+ size_t numLevels,
+ const Duration levelDurations[]);
MultiLevelTimeSeries(
size_t numBuckets,
- std::initializer_list<TimeType> durations);
+ std::initializer_list<Duration> durations);
/*
* Return the number of buckets used to track time series at each level.
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- const Level& getLevel(TimeType start) const {
+ const Level& getLevel(TimePoint start) const {
for (const auto& level : levels_) {
if (level.isAllTime()) {
return level;
}
// We should always have an all-time level, so this is never reached.
LOG(FATAL) << "No level of timeseries covers internval"
- << " from " << start.count() << " to now";
+ << " from " << start.time_since_epoch().count() << " to now";
return levels_.back();
}
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- const Level& getLevelByDuration(TimeType duration) const {
+ const Level& getLevelByDuration(Duration duration) const {
// since the number of levels is expected to be small (less than 5 in most
// cases), a simple linear scan would be efficient and is intentionally
// chosen here over other alternatives for lookup.
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- template <typename ReturnType=double, typename Interval=TimeType>
+ template <typename ReturnType = double, typename Interval = Duration>
ReturnType rate(int level) const {
return getLevel(level).template rate<ReturnType, Interval>();
}
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- template <typename ReturnType=double, typename Interval=TimeType>
+ template <typename ReturnType = double, typename Interval = Duration>
ReturnType countRate(int level) const {
return getLevel(level).template countRate<ReturnType, Interval>();
}
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- ValueType sum(TimeType duration) const {
+ ValueType sum(Duration duration) const {
return getLevelByDuration(duration).sum();
}
* not been called recently.
*/
template <typename ReturnType = double>
- ReturnType avg(TimeType duration) const {
+ ReturnType avg(Duration duration) const {
return getLevelByDuration(duration).template avg<ReturnType>();
}
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- template <typename ReturnType = double, typename Interval = TimeType>
- ReturnType rate(TimeType duration) const {
+ template <typename ReturnType = double, typename Interval = Duration>
+ ReturnType rate(Duration duration) const {
return getLevelByDuration(duration).template rate<ReturnType, Interval>();
}
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- int64_t count(TimeType duration) const {
+ int64_t count(Duration duration) const {
return getLevelByDuration(duration).count();
}
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- template <typename ReturnType = double, typename Interval = TimeType>
- ReturnType countRate(TimeType duration) const {
+ template <typename ReturnType = double, typename Interval = Duration>
+ ReturnType countRate(Duration duration) const {
return getLevelByDuration(duration)
.template countRate<ReturnType, Interval>();
}
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- ValueType sum(TimeType start, TimeType end) const {
+ ValueType sum(TimePoint start, TimePoint end) const {
return getLevel(start).sum(start, end);
}
/*
* Estimate the average value during the specified time period.
*
- * The same caveats documented in the sum(TimeType start, TimeType end)
+ * The same caveats documented in the sum(TimePoint start, TimePoint end)
* comments apply here as well.
*
* Note: you should generally call update() or flush() before accessing the
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- template <typename ReturnType=double>
- ReturnType avg(TimeType start, TimeType end) const {
+ template <typename ReturnType = double>
+ ReturnType avg(TimePoint start, TimePoint end) const {
return getLevel(start).template avg<ReturnType>(start, end);
}
/*
* Estimate the rate during the specified time period.
*
- * The same caveats documented in the sum(TimeType start, TimeType end)
+ * The same caveats documented in the sum(TimePoint start, TimePoint end)
* comments apply here as well.
*
* Note: you should generally call update() or flush() before accessing the
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- template <typename ReturnType=double>
- ReturnType rate(TimeType start, TimeType end) const {
+ template <typename ReturnType = double>
+ ReturnType rate(TimePoint start, TimePoint end) const {
return getLevel(start).template rate<ReturnType>(start, end);
}
/*
* Estimate the count during the specified time period.
*
- * The same caveats documented in the sum(TimeType start, TimeType end)
+ * The same caveats documented in the sum(TimePoint start, TimePoint end)
* comments apply here as well.
*
* Note: you should generally call update() or flush() before accessing the
* data. Otherwise you may be reading stale data if update() or flush() has
* not been called recently.
*/
- int64_t count(TimeType start, TimeType end) const {
+ int64_t count(TimePoint start, TimePoint end) const {
return getLevel(start).count(start, end);
}
* addValue() or update(), now will be ignored and the latest timestamp will
* be used.
*/
- void addValue(TimeType now, const ValueType& val);
+ void addValue(TimePoint now, const ValueType& val);
/*
* Adds the value 'val' at time 'now' to all levels.
*/
- void addValue(TimeType now, const ValueType& val, int64_t times);
+ void addValue(TimePoint now, const ValueType& val, int64_t times);
/*
- * Adds the value 'val' at time 'now' to all levels as the sum of 'nsamples'
- * samples.
+ * Adds the value 'total' at time 'now' to all levels as the sum of
+ * 'nsamples' samples.
*/
- void addValueAggregated(TimeType now, const ValueType& sum, int64_t nsamples);
+ void
+ addValueAggregated(TimePoint now, const ValueType& total, int64_t nsamples);
/*
* Update all the levels to the specified time, doing all the necessary
* call update() before accessing the data. Otherwise you may be reading
* stale data if update() has not been called recently.
*/
- void update(TimeType now);
+ void update(TimePoint now);
/*
* Reset all the timeseries to an empty state as if no data points have ever
*/
void flush();
+ /*
+ * Legacy APIs that accept a Duration parameters rather than TimePoint.
+ *
+ * These treat the Duration as relative to the clock epoch.
+ * Prefer using the correct TimePoint-based APIs instead. These APIs will
+ * eventually be deprecated and removed.
+ */
+ void update(Duration now) {
+ update(TimePoint(now));
+ }
+ void addValue(Duration now, const ValueType& value) {
+ addValue(TimePoint(now), value);
+ }
+ void addValue(Duration now, const ValueType& value, int64_t times) {
+ addValue(TimePoint(now), value, times);
+ }
+ void
+ addValueAggregated(Duration now, const ValueType& total, int64_t nsamples) {
+ addValueAggregated(TimePoint(now), total, nsamples);
+ }
+
private:
std::vector<Level> levels_;
// Updates within the same time interval are cached
// They are flushed out when updates from a different time comes,
// or flush() is called.
- TimeType cachedTime_;
+ TimePoint cachedTime_;
ValueType cachedSum_;
int cachedCount_;
};
template <typename T, typename CT, typename C>
void TimeseriesHistogram<T, CT, C>::addValue(
- TimeType now,
+ TimePoint now,
const ValueType& value) {
buckets_.getByValue(value).addValue(now, value);
maybeHandleSingleUniqueValue(value);
template <typename T, typename CT, typename C>
void TimeseriesHistogram<T, CT, C>::addValue(
- TimeType now,
+ TimePoint now,
const ValueType& value,
int64_t times) {
buckets_.getByValue(value).addValue(now, value, times);
template <typename T, typename CT, typename C>
void TimeseriesHistogram<T, CT, C>::addValues(
- TimeType now,
+ TimePoint now,
const folly::Histogram<ValueType>& hist) {
CHECK_EQ(hist.getMin(), getMin());
CHECK_EQ(hist.getMax(), getMax());
template <typename T, typename CT, typename C>
T TimeseriesHistogram<T, CT, C>::getPercentileEstimate(
double pct,
- TimeType start,
- TimeType end) const {
+ TimePoint start,
+ TimePoint end) const {
if (singleUniqueValue_) {
return firstValue_;
}
template <typename T, typename CT, typename C>
int TimeseriesHistogram<T, CT, C>::getPercentileBucketIdx(
double pct,
- TimeType start,
- TimeType end) const {
+ TimePoint start,
+ TimePoint end) const {
return buckets_.getPercentileBucketIdx(pct / 100.0,
CountFromInterval(start, end));
}
}
template <typename T, typename CT, typename C>
-void TimeseriesHistogram<T, CT, C>::update(TimeType now) {
+void TimeseriesHistogram<T, CT, C>::update(TimePoint now) {
for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
buckets_.getByIndex(i).update(now);
}
template <typename T, typename CT, typename C>
std::string TimeseriesHistogram<T, CT, C>::getString(
- TimeType start,
- TimeType end) const {
+ TimePoint start,
+ TimePoint end) const {
std::string result;
for (size_t i = 0; i < buckets_.getNumBuckets(); i++) {
void TimeseriesHistogram<T, CT, C>::computeAvgData(
ValueType* total,
int64_t* nsamples,
- TimeType start,
- TimeType end) const {
+ TimePoint start,
+ TimePoint end) const {
for (unsigned int b = 0; b < buckets_.getNumBuckets(); ++b) {
const auto& levelObj = buckets_.getByIndex(b).getLevel(start);
*total += levelObj.sum(start, end);
template <typename T, typename CT, typename C>
void TimeseriesHistogram<T, CT, C>::computeRateData(
ValueType* total,
- TimeType* elapsed,
+ Duration* elapsed,
int level) const {
for (unsigned int b = 0; b < buckets_.getNumBuckets(); ++b) {
const auto& levelObj = buckets_.getByIndex(b).getLevel(level);
template <class T, class CT, class C>
void TimeseriesHistogram<T, CT, C>::computeRateData(
ValueType* total,
- TimeType* elapsed,
- TimeType start,
- TimeType end) const {
+ Duration* elapsed,
+ TimePoint start,
+ TimePoint end) const {
for (unsigned int b = 0; b < buckets_.getNumBuckets(); ++b) {
const auto& level = buckets_.getByIndex(b).getLevel(start);
*total += level.sum(start, end);
using Clock = CT;
using Duration = typename Clock::duration;
using TimePoint = typename Clock::time_point;
- // The legacy TimeType. The older code used this instead of Duration and
- // TimePoint. This will eventually be removed as the code is transitioned to
- // Duration and TimePoint.
- using TimeType = typename Clock::duration;
/*
* Create a TimeSeries histogram and initialize the bucketing and levels.
}
/* Total count of values added during the given interval (all buckets). */
- int64_t count(TimeType start, TimeType end) const {
+ int64_t count(TimePoint start, TimePoint end) const {
int64_t total = 0;
for (unsigned int b = 0; b < buckets_.getNumBuckets(); ++b) {
total += buckets_.getByIndex(b).count(start, end);
}
/* Total sum of values added during the given interval (all buckets). */
- ValueType sum(TimeType start, TimeType end) const {
+ ValueType sum(TimePoint start, TimePoint end) const {
ValueType total = ValueType();
for (unsigned int b = 0; b < buckets_.getNumBuckets(); ++b) {
total += buckets_.getByIndex(b).sum(start, end);
/* Average of values added during the given interval (all buckets). */
template <typename ReturnType = double>
- ReturnType avg(TimeType start, TimeType end) const {
+ ReturnType avg(TimePoint start, TimePoint end) const {
auto total = ValueType();
int64_t nsamples = 0;
computeAvgData(&total, &nsamples, start, end);
template <typename ReturnType = double>
ReturnType rate(int level) const {
auto total = ValueType();
- TimeType elapsed(0);
+ Duration elapsed(0);
computeRateData(&total, &elapsed, level);
- return folly::detail::rateHelper<ReturnType, TimeType, TimeType>(
+ return folly::detail::rateHelper<ReturnType, Duration, Duration>(
total, elapsed);
}
* This is the sum of all values divided by the time interval (in seconds).
*/
template <typename ReturnType = double>
- ReturnType rate(TimeType start, TimeType end) const {
+ ReturnType rate(TimePoint start, TimePoint end) const {
auto total = ValueType();
- TimeType elapsed(0);
+ Duration elapsed(0);
computeRateData(&total, &elapsed, start, end);
- return folly::detail::rateHelper<ReturnType, TimeType, TimeType>(
+ return folly::detail::rateHelper<ReturnType, Duration, Duration>(
total, elapsed);
}
* must call this directly before querying to ensure that the data in all
* buckets is decayed properly.
*/
- void update(TimeType now);
+ void update(TimePoint now);
/* clear all the data from the histogram. */
void clear();
/* Add a value into the histogram with timestamp 'now' */
- void addValue(TimeType now, const ValueType& value);
+ void addValue(TimePoint now, const ValueType& value);
/* Add a value the given number of times with timestamp 'now' */
- void addValue(TimeType now, const ValueType& value, int64_t times);
+ void addValue(TimePoint now, const ValueType& value, int64_t times);
/*
* Add all of the values from the specified histogram.
* Histogram that is updated frequently, and only add it to the global
* TimeseriesHistogram once a second.
*/
- void addValues(TimeType now, const folly::Histogram<ValueType>& values);
+ void addValues(TimePoint now, const folly::Histogram<ValueType>& values);
/*
* Return an estimate of the value at the given percentile in the histogram
* getPercentileEstimate(int pct, int level) for the explanation of the
* estimation algorithm.
*/
- ValueType getPercentileEstimate(double pct, TimeType start, TimeType end)
- const;
+ ValueType getPercentileEstimate(double pct, TimePoint start, TimePoint end)
+ const;
/*
* Return the bucket index that the given percentile falls into (in the
* given historical interval). This index can then be used to retrieve either
* the bucket threshold, or other data from inside the bucket.
*/
- int getPercentileBucketIdx(double pct, TimeType start, TimeType end) const;
+ int getPercentileBucketIdx(double pct, TimePoint start, TimePoint end) const;
/* Get the bucket threshold for the bucket containing the given pct. */
int getPercentileBucketMin(double pct, int level) const {
return getBucketMin(getPercentileBucketIdx(pct, level));
}
/* Get the bucket threshold for the bucket containing the given pct. */
- int getPercentileBucketMin(double pct, TimeType start, TimeType end) const {
+ int getPercentileBucketMin(double pct, TimePoint start, TimePoint end) const {
return getBucketMin(getPercentileBucketIdx(pct, start, end));
}
* Print out serialized data for all buckets in the historical interval.
* For format, please see getString(int level).
*/
- std::string getString(TimeType start, TimeType end) const;
+ std::string getString(TimePoint start, TimePoint end) const;
+
+ /*
+ * Legacy APIs that accept a Duration parameters rather than TimePoint.
+ *
+ * These treat the Duration as relative to the clock epoch.
+ * Prefer using the correct TimePoint-based APIs instead. These APIs will
+ * eventually be deprecated and removed.
+ */
+ void update(Duration now) {
+ update(TimePoint(now));
+ }
+ void addValue(Duration now, const ValueType& value) {
+ addValue(TimePoint(now), value);
+ }
+ void addValue(Duration now, const ValueType& value, int64_t times) {
+ addValue(TimePoint(now), value, times);
+ }
+ void addValues(Duration now, const folly::Histogram<ValueType>& values) {
+ addValues(TimePoint(now), values);
+ }
private:
typedef ContainerType Bucket;
int level_;
};
struct CountFromInterval {
- explicit CountFromInterval(TimeType start, TimeType end)
- : start_(start),
- end_(end) {}
+ explicit CountFromInterval(TimePoint start, TimePoint end)
+ : start_(start), end_(end) {}
uint64_t operator()(const ContainerType& bucket) const {
return bucket.count(start_, end_);
}
private:
- TimeType start_;
- TimeType end_;
+ TimePoint start_;
+ TimePoint end_;
};
struct AvgFromLevel {
template <typename ReturnType>
struct AvgFromInterval {
- explicit AvgFromInterval(TimeType start, TimeType end)
- : start_(start),
- end_(end) {}
+ explicit AvgFromInterval(TimePoint start, TimePoint end)
+ : start_(start), end_(end) {}
ReturnType operator()(const ContainerType& bucket) const {
return bucket.template avg<ReturnType>(start_, end_);
}
private:
- TimeType start_;
- TimeType end_;
+ TimePoint start_;
+ TimePoint end_;
};
/*
void computeAvgData(
ValueType* total,
int64_t* nsamples,
- TimeType start,
- TimeType end) const;
- void computeRateData(ValueType* total, TimeType* elapsed, int level) const;
+ TimePoint start,
+ TimePoint end) const;
+ void computeRateData(ValueType* total, Duration* elapsed, int level) const;
void computeRateData(
ValueType* total,
- TimeType* elapsed,
- TimeType start,
- TimeType end) const;
+ Duration* elapsed,
+ TimePoint start,
+ TimePoint end) const;
folly::detail::HistogramBuckets<ValueType, ContainerType> buckets_;
bool haveNotSeenValue_;
using namespace folly;
using std::chrono::seconds;
+namespace {
namespace IntMTMHTS {
enum Levels {
MINUTE,
typedef std::mt19937 RandomInt32;
+using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>;
+StatsClock::time_point mkTimePoint(int value) {
+ return StatsClock::time_point(StatsClock::duration(value));
+}
+}
+
TEST(TimeseriesHistogram, Percentile) {
RandomInt32 random(5);
// [10, 109], 12 buckets including above and below
}
int maxVal = 120;
- h.addValue(seconds(0), 0);
- h.addValue(seconds(0), maxVal);
+ h.addValue(mkTimePoint(0), 0);
+ h.addValue(mkTimePoint(0), maxVal);
for (int i = 0; i < 98; i++) {
- h.addValue(seconds(0), random() % maxVal);
+ h.addValue(mkTimePoint(0), random() % maxVal);
}
- h.update(std::chrono::duration_cast<std::chrono::seconds>(
- std::chrono::system_clock::now().time_since_epoch()));
+ h.update(mkTimePoint(1500000000));
// bucket 0 stores everything below min, so its minimum
// is the lowest possible number
EXPECT_EQ(std::numeric_limits<int>::min(),
IntMTMHTS::kDurations));
int maxVal = 120;
- hist.addValue(seconds(0), 0);
- hist.addValue(seconds(0), maxVal);
+ hist.addValue(mkTimePoint(0), 0);
+ hist.addValue(mkTimePoint(0), maxVal);
for (int i = 0; i < 98; i++) {
- hist.addValue(seconds(0), random() % maxVal);
+ hist.addValue(mkTimePoint(0), random() % maxVal);
}
- hist.update(seconds(0));
+ hist.update(mkTimePoint(0));
const char* const kStringValues1[IntMTMHTS::NUM_LEVELS] = {
"-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
for (int now = 0; now < 3600; now++) {
for (int i = 0; i < 100; i++) {
- hist.addValue(seconds(now), i, 2); // adds each item 2 times
+ hist.addValue(mkTimePoint(now), i, 2); // adds each item 2 times
}
}
for (int now = 0; now < 3600; now++) {
for (int i = 0; i < 100; i++) {
- hist.addValue(seconds(now), i);
+ hist.addValue(mkTimePoint(now), i);
}
}
- hist.update(seconds(3599));
+ hist.update(mkTimePoint(3599));
for (int pct = 1; pct <= 100; pct++) {
int expected = (pct - 1) / 10 * 10;
EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::HOUR));
EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::ALLTIME));
- EXPECT_EQ(1000, hist.count(seconds(10), seconds(20)));
- EXPECT_EQ(49500, hist.sum(seconds(10), seconds(20)));
- EXPECT_EQ(4950, hist.rate(seconds(10), seconds(20)));
- EXPECT_EQ(49.5, hist.avg<double>(seconds(10), seconds(20)));
+ EXPECT_EQ(1000, hist.count(mkTimePoint(10), mkTimePoint(20)));
+ EXPECT_EQ(49500, hist.sum(mkTimePoint(10), mkTimePoint(20)));
+ EXPECT_EQ(4950, hist.rate(mkTimePoint(10), mkTimePoint(20)));
+ EXPECT_EQ(49.5, hist.avg<double>(mkTimePoint(10), mkTimePoint(20)));
- EXPECT_EQ(200, hist.count(seconds(3550), seconds(3552)));
- EXPECT_EQ(9900, hist.sum(seconds(3550), seconds(3552)));
- EXPECT_EQ(4950, hist.rate(seconds(3550), seconds(3552)));
- EXPECT_EQ(49.5, hist.avg<double>(seconds(3550), seconds(3552)));
+ EXPECT_EQ(200, hist.count(mkTimePoint(3550), mkTimePoint(3552)));
+ EXPECT_EQ(9900, hist.sum(mkTimePoint(3550), mkTimePoint(3552)));
+ EXPECT_EQ(4950, hist.rate(mkTimePoint(3550), mkTimePoint(3552)));
+ EXPECT_EQ(49.5, hist.avg<double>(mkTimePoint(3550), mkTimePoint(3552)));
- EXPECT_EQ(0, hist.count(seconds(4550), seconds(4552)));
- EXPECT_EQ(0, hist.sum(seconds(4550), seconds(4552)));
- EXPECT_EQ(0, hist.rate(seconds(4550), seconds(4552)));
- EXPECT_EQ(0, hist.avg<double>(seconds(4550), seconds(4552)));
+ EXPECT_EQ(0, hist.count(mkTimePoint(4550), mkTimePoint(4552)));
+ EXPECT_EQ(0, hist.sum(mkTimePoint(4550), mkTimePoint(4552)));
+ EXPECT_EQ(0, hist.rate(mkTimePoint(4550), mkTimePoint(4552)));
+ EXPECT_EQ(0, hist.avg<double>(mkTimePoint(4550), mkTimePoint(4552)));
}
// -----------------
for (int now = 0; now < 3600; now++) {
for (int i = 0; i < 100; i++) {
- hist.addValue(seconds(now), i, 2); // adds each item 2 times
+ hist.addValue(mkTimePoint(now), i, 2); // adds each item 2 times
}
}
- hist.update(seconds(3599));
+ hist.update(mkTimePoint(3599));
for (int pct = 1; pct <= 100; pct++) {
int expected = (pct - 1) / 10 * 10;
EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
for (int now = 0; now < 3600; now++) {
for (int i = 0; i < 50; i++) {
- hist.addValue(seconds(now), i * 2, 2); // adds each item 2 times
+ hist.addValue(mkTimePoint(now), i * 2, 2); // adds each item 2 times
}
}
- hist.update(seconds(3599));
+ hist.update(mkTimePoint(3599));
for (int pct = 1; pct <= 100; pct++) {
int expected = (pct - 1) / 10 * 10;
EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
}
for (int i = 0; i < 100; ++i) {
- hist.addValue(seconds(3599), 200 + i);
+ hist.addValue(mkTimePoint(3599), 200 + i);
}
- hist.update(seconds(3599));
+ hist.update(mkTimePoint(3599));
EXPECT_EQ(100,
hist.getBucket(hist.getNumBuckets() - 1).count(
IntMTMHTS::ALLTIME));
60, IntMHTS::NUM_LEVELS,
IntMHTS::kDurations));
- mhts.update(seconds(0));
+ mhts.update(mkTimePoint(0));
int curTime;
for (curTime = 0; curTime < 7200; curTime++) {
- mhts.addValue(seconds(curTime), 1);
+ mhts.addValue(mkTimePoint(curTime), 1);
}
for (curTime = 7200; curTime < 7200 + 3540; curTime++) {
- mhts.addValue(seconds(curTime), 10);
+ mhts.addValue(mkTimePoint(curTime), 10);
}
for (curTime = 7200 + 3540; curTime < 7200 + 3600; curTime++) {
- mhts.addValue(seconds(curTime), 100);
+ mhts.addValue(mkTimePoint(curTime), 100);
}
- mhts.update(seconds(7200 + 3600 - 1));
+ mhts.update(mkTimePoint(7200 + 3600 - 1));
struct TimeInterval {
- TimeInterval(int s, int e)
- : start(s), end(e) {}
+ TimeInterval(int s, int e) : start(mkTimePoint(s)), end(mkTimePoint(e)) {}
- std::chrono::seconds start;
- std::chrono::seconds end;
+ StatsClock::time_point start;
+ StatsClock::time_point end;
};
TimeInterval intervals[12] = {
{ curTime - 60, curTime },
// 3 levels
for (int i = 1; i <= 100; i++) {
EXPECT_EQ(96, mhts.getPercentileBucketMin(i, 0));
- EXPECT_EQ(96, mhts.getPercentileBucketMin(i, seconds(curTime - 60),
- seconds(curTime)));
- EXPECT_EQ(8, mhts.getPercentileBucketMin(i, seconds(curTime - 3540),
- seconds(curTime - 60)));
+ EXPECT_EQ(
+ 96,
+ mhts.getPercentileBucketMin(
+ i, mkTimePoint(curTime - 60), mkTimePoint(curTime)));
+ EXPECT_EQ(
+ 8,
+ mhts.getPercentileBucketMin(
+ i, mkTimePoint(curTime - 3540), mkTimePoint(curTime - 60)));
}
EXPECT_EQ(8, mhts.getPercentileBucketMin(1, 1));
// Some of the older intervals that fall in the alltime bucket
// are off by 1 or 2 in their estimated counts.
size_t tolerance = 0;
- if (itv.start <= seconds(curTime - 7200)) {
+ if (itv.start <= mkTimePoint(curTime - 7200)) {
tolerance = 2;
- } else if (itv.start <= seconds(curTime - 3000)) {
+ } else if (itv.start <= mkTimePoint(curTime - 3000)) {
tolerance = 1;
}
size_t actualCount = (itv.end - itv.start).count();
const int kNumIters = 1000;
for (int jj = 0; jj < kNumIters; ++jj) {
- h.addValue(seconds(time(nullptr)), value);
+ h.addValue(mkTimePoint(1), value);
}
- h.update(seconds(time(nullptr)));
+ h.update(mkTimePoint(1));
// since we've only added one unique value, all percentiles should
// be that value
EXPECT_EQ(h.getPercentileEstimate(10, 0), value);
// Things get trickier if there are multiple unique values.
const int kNewValue = 750;
for (int kk = 0; kk < 2*kNumIters; ++kk) {
- h.addValue(seconds(time(nullptr)), kNewValue);
+ h.addValue(mkTimePoint(1), kNewValue);
}
- h.update(seconds(time(nullptr)));
+ h.update(mkTimePoint(1));
EXPECT_NEAR(h.getPercentileEstimate(50, 0), kNewValue+5, 5);
if (value >= 0 && value <= 1000) {
// only do further testing if value is within our bucket range,
using std::vector;
using folly::BucketedTimeSeries;
+using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>;
+using TimePoint = StatsClock::time_point;
+
+/*
+ * Helper functions to allow us to directly log time points and duration
+ */
+namespace std {
+std::ostream& operator<<(std::ostream& os, std::chrono::seconds s) {
+ os << s.count();
+ return os;
+}
+std::ostream& operator<<(std::ostream& os, TimePoint tp) {
+ os << tp.time_since_epoch().count();
+ return os;
+}
+}
+
+namespace {
+TimePoint mkTimePoint(int value) {
+ return TimePoint(StatsClock::duration(value));
+}
+
struct TestData {
- size_t duration;
+ TestData(int d, int b, std::initializer_list<int> starts)
+ : duration(d), numBuckets(b) {
+ bucketStarts.reserve(starts.size());
+ for (int s : starts) {
+ bucketStarts.push_back(mkTimePoint(s));
+ }
+ }
+ seconds duration;
size_t numBuckets;
- vector<ssize_t> bucketStarts;
+ vector<TimePoint> bucketStarts;
};
vector<TestData> testData = {
// 71 seconds x 4 buckets
// 1 second x 1 buckets
{ 1, 1, {0}},
};
+}
TEST(BucketedTimeSeries, getBucketInfo) {
for (const auto& data : testData) {
- BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
+ BucketedTimeSeries<int64_t> ts(data.numBuckets, data.duration);
for (uint32_t n = 0; n < 10000; n += 1234) {
seconds offset(n * data.duration);
for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
- seconds bucketStart(data.bucketStarts[idx]);
- seconds nextBucketStart;
+ auto bucketStart = data.bucketStarts[idx];
+ TimePoint nextBucketStart;
if (idx + 1 < data.numBuckets) {
- nextBucketStart = seconds(data.bucketStarts[idx + 1]);
+ nextBucketStart = data.bucketStarts[idx + 1];
} else {
- nextBucketStart = seconds(data.duration);
+ nextBucketStart = TimePoint(data.duration);
}
- seconds expectedStart = offset + bucketStart;
- seconds expectedNextStart = offset + nextBucketStart;
- seconds midpoint = (expectedStart + expectedNextStart) / 2;
+ TimePoint expectedStart = offset + bucketStart;
+ TimePoint expectedNextStart = offset + nextBucketStart;
+ TimePoint midpoint =
+ expectedStart + (expectedNextStart - expectedStart) / 2;
- vector<std::pair<string, seconds>> timePoints = {
- {"expectedStart", expectedStart},
- {"midpoint", midpoint},
- {"expectedEnd", expectedNextStart - seconds(1)},
+ vector<std::pair<string, TimePoint>> timePoints = {
+ {"expectedStart", expectedStart},
+ {"midpoint", midpoint},
+ {"expectedEnd", expectedNextStart - seconds(1)},
};
for (const auto& point : timePoints) {
// Check that getBucketIdx() returns the expected index
- EXPECT_EQ(idx, ts.getBucketIdx(point.second)) <<
- data.duration << "x" << data.numBuckets << ": " <<
- point.first << "=" << point.second.count();
+ EXPECT_EQ(idx, ts.getBucketIdx(point.second))
+ << data.duration << "x" << data.numBuckets << ": " << point.first
+ << "=" << point.second;
// Check the data returned by getBucketInfo()
size_t returnedIdx;
- seconds returnedStart;
- seconds returnedNextStart;
+ TimePoint returnedStart;
+ TimePoint returnedNextStart;
ts.getBucketInfo(expectedStart, &returnedIdx,
&returnedStart, &returnedNextStart);
- EXPECT_EQ(idx, returnedIdx) <<
- data.duration << "x" << data.numBuckets << ": " <<
- point.first << "=" << point.second.count();
- EXPECT_EQ(expectedStart.count(), returnedStart.count()) <<
- data.duration << "x" << data.numBuckets << ": " <<
- point.first << "=" << point.second.count();
- EXPECT_EQ(expectedNextStart.count(), returnedNextStart.count()) <<
- data.duration << "x" << data.numBuckets << ": " <<
- point.first << "=" << point.second.count();
+ EXPECT_EQ(idx, returnedIdx) << data.duration << "x" << data.numBuckets
+ << ": " << point.first << "="
+ << point.second;
+ EXPECT_EQ(expectedStart, returnedStart)
+ << data.duration << "x" << data.numBuckets << ": " << point.first
+ << "=" << point.second;
+ EXPECT_EQ(expectedNextStart, returnedNextStart)
+ << data.duration << "x" << data.numBuckets << ": " << point.first
+ << "=" << point.second;
}
}
}
TEST(BucketedTimeSeries, forEachBucket) {
typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
struct BucketInfo {
- BucketInfo(const Bucket* b, seconds s, seconds ns)
- : bucket(b), start(s), nextStart(ns) {}
+ BucketInfo(const Bucket* b, TimePoint s, TimePoint ns)
+ : bucket(b), start(s), nextStart(ns) {}
const Bucket* bucket;
- seconds start;
- seconds nextStart;
+ TimePoint start;
+ TimePoint nextStart;
};
for (const auto& data : testData) {
BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
vector<BucketInfo> info;
- auto fn = [&](const Bucket& bucket, seconds bucketStart,
- seconds bucketEnd) -> bool {
+ auto fn = [&](
+ const Bucket& bucket,
+ TimePoint bucketStart,
+ TimePoint bucketEnd) -> bool {
info.emplace_back(&bucket, bucketStart, bucketEnd);
return true;
};
// Check the data passed in to the function
size_t infoIdx = 0;
size_t bucketIdx = 1;
- ssize_t offset = -data.duration;
+ seconds offset = -data.duration;
for (size_t n = 0; n < data.numBuckets; ++n) {
if (bucketIdx >= data.numBuckets) {
bucketIdx = 0;
offset += data.duration;
}
- EXPECT_EQ(data.bucketStarts[bucketIdx] + offset,
- info[infoIdx].start.count()) <<
- data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
- bucketIdx << ", infoIdx=" << infoIdx;
+ EXPECT_EQ(data.bucketStarts[bucketIdx] + offset, info[infoIdx].start)
+ << data.duration << "x" << data.numBuckets
+ << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
size_t nextBucketIdx = bucketIdx + 1;
- ssize_t nextOffset = offset;
+ seconds nextOffset = offset;
if (nextBucketIdx >= data.numBuckets) {
nextBucketIdx = 0;
nextOffset += data.duration;
}
- EXPECT_EQ(data.bucketStarts[nextBucketIdx] + nextOffset,
- info[infoIdx].nextStart.count()) <<
- data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
- bucketIdx << ", infoIdx=" << infoIdx;
+ EXPECT_EQ(
+ data.bucketStarts[nextBucketIdx] + nextOffset,
+ info[infoIdx].nextStart)
+ << data.duration << "x" << data.numBuckets
+ << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
// This is entirely in the first bucket, which has a sum of 4.
// The code knows only part of the bucket is covered, and correctly
// estimates the desired sum as 3.
- EXPECT_EQ(2, a.sum(seconds(0), seconds(2)));
+ EXPECT_EQ(2, a.sum(mkTimePoint(0), mkTimePoint(2)));
}
TEST(BucketedTimeSeries, queryByInterval) {
for (unsigned int i = 0; i < kDuration; ++i) {
// add value 'i' at time 'i'
- b.addValue(seconds(i), i);
+ b.addValue(mkTimePoint(i), i);
}
// Current bucket state:
{0, -1, -1, -1, -1, -1, -1}
};
- seconds currentTime = b.getLatestTime() + seconds(1);
+ TimePoint currentTime = b.getLatestTime() + seconds(1);
for (int i = 0; i <= kDuration + 1; i++) {
for (int j = 0; j <= kDuration - i; j++) {
- seconds start = currentTime - seconds(i + j);
- seconds end = currentTime - seconds(i);
+ TimePoint start = currentTime - seconds(i + j);
+ TimePoint end = currentTime - seconds(i);
double expectedSum = expectedSums1[i][j];
- EXPECT_EQ(expectedSum, b.sum(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedSum, b.sum(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
uint64_t expectedCount = expectedCounts1[i][j];
- EXPECT_EQ(expectedCount, b.count(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedCount, b.count(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
- EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedAvg, b.avg(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
double expectedRate = j ? expectedSum / j : 0;
- EXPECT_EQ(expectedRate, b.rate(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedRate, b.rate(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
}
}
// Add 3 more values.
// This will overwrite 1 full bucket, and put us halfway through the next.
for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
- b.addValue(seconds(i), i);
+ b.addValue(mkTimePoint(i), i);
}
- EXPECT_EQ(seconds(4), b.getEarliestTime());
+ EXPECT_EQ(mkTimePoint(4), b.getEarliestTime());
// Current bucket state:
// 0: time=[6, 8): values=(6, 7), sum=13, count=2
currentTime = b.getLatestTime() + seconds(1);
for (int i = 0; i <= kDuration + 1; i++) {
for (int j = 0; j <= kDuration - i; j++) {
- seconds start = currentTime - seconds(i + j);
- seconds end = currentTime - seconds(i);
+ TimePoint start = currentTime - seconds(i + j);
+ TimePoint end = currentTime - seconds(i);
double expectedSum = expectedSums2[i][j];
- EXPECT_EQ(expectedSum, b.sum(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedSum, b.sum(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
uint64_t expectedCount = expectedCounts2[i][j];
- EXPECT_EQ(expectedCount, b.count(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedCount, b.count(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
- EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedAvg, b.avg(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
- seconds dataStart = std::max(start, b.getEarliestTime());
- seconds dataEnd = std::max(end, dataStart);
+ TimePoint dataStart = std::max(start, b.getEarliestTime());
+ TimePoint dataEnd = std::max(end, dataStart);
seconds expectedInterval = dataEnd - dataStart;
- EXPECT_EQ(expectedInterval, b.elapsed(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedInterval, b.elapsed(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
double expectedRate = expectedInterval.count() ?
expectedSum / expectedInterval.count() : 0;
- EXPECT_EQ(expectedRate, b.rate(start, end)) <<
- "i=" << i << ", j=" << j <<
- ", interval=[" << start.count() << ", " << end.count() << ")";
+ EXPECT_EQ(expectedRate, b.rate(start, end))
+ << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+ << ")";
}
}
}
// Add data points at a constant rate of 10 per second.
// Start adding data points at kDuration, and fill half of the buckets for
// now.
- seconds start = kDuration;
- seconds end = kDuration + (kDuration / 2);
+ TimePoint start(kDuration);
+ TimePoint end(kDuration + (kDuration / 2));
const double kFixedRate = 10.0;
- for (seconds i = start; i < end; ++i) {
+ for (TimePoint i = start; i < end; i += seconds(1)) {
b.addValue(i, kFixedRate);
}
// We haven't added anything before time kDuration.
// Querying data earlier than this should result in a rate of 0.
- EXPECT_EQ(0.0, b.rate(seconds(0), seconds(1)));
- EXPECT_EQ(0.0, b.countRate(seconds(0), seconds(1)));
+ EXPECT_EQ(0.0, b.rate(mkTimePoint(0), mkTimePoint(1)));
+ EXPECT_EQ(0.0, b.countRate(mkTimePoint(0), mkTimePoint(1)));
// Fill the remainder of the timeseries from kDuration to kDuration*2
start = end;
- end = kDuration * 2;
- for (seconds i = start; i < end; ++i) {
+ end = TimePoint(kDuration * 2);
+ for (TimePoint i = start; i < end; i += seconds(1)) {
b.addValue(i, kFixedRate);
}
EXPECT_EQ(kFixedRate, b.rate());
- EXPECT_EQ(kFixedRate, b.rate(kDuration, kDuration * 2));
- EXPECT_EQ(kFixedRate, b.rate(seconds(0), kDuration * 2));
- EXPECT_EQ(kFixedRate, b.rate(seconds(0), kDuration * 10));
+ EXPECT_EQ(kFixedRate, b.rate(TimePoint(kDuration), TimePoint(kDuration * 2)));
+ EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 2)));
+ EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 10)));
EXPECT_EQ(1.0, b.countRate());
- EXPECT_EQ(1.0, b.countRate(kDuration, kDuration * 2));
- EXPECT_EQ(1.0, b.countRate(seconds(0), kDuration * 2));
- EXPECT_EQ(1.0, b.countRate(seconds(0), kDuration * 10));
+ EXPECT_EQ(1.0, b.countRate(TimePoint(kDuration), TimePoint(kDuration * 2)));
+ EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 2)));
+ EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 10)));
}
TEST(BucketedTimeSeries, addHistorical) {
BucketedTimeSeries<double> b(kNumBuckets, kDuration);
// Initially fill with a constant rate of data
- for (seconds i = seconds(0); i < seconds(10); ++i) {
+ for (TimePoint i = mkTimePoint(0); i < mkTimePoint(10); i += seconds(1)) {
b.addValue(i, 10.0);
}
EXPECT_EQ(10, b.count());
// Add some more data points to the middle bucket
- b.addValue(seconds(4), 40.0);
- b.addValue(seconds(5), 40.0);
+ b.addValue(mkTimePoint(4), 40.0);
+ b.addValue(mkTimePoint(5), 40.0);
EXPECT_EQ(15.0, b.avg());
EXPECT_EQ(18.0, b.rate());
EXPECT_EQ(12, b.count());
// Now start adding more current data points, until we are about to roll over
// the bucket where we added the extra historical data.
- for (seconds i = seconds(10); i < seconds(14); ++i) {
+ for (TimePoint i = mkTimePoint(10); i < mkTimePoint(14); i += seconds(1)) {
b.addValue(i, 10.0);
}
EXPECT_EQ(15.0, b.avg());
EXPECT_EQ(12, b.count());
// Now roll over the middle bucket
- b.addValue(seconds(14), 10.0);
- b.addValue(seconds(15), 10.0);
+ b.addValue(mkTimePoint(14), 10.0);
+ b.addValue(mkTimePoint(15), 10.0);
EXPECT_EQ(10.0, b.avg());
EXPECT_EQ(10.0, b.rate());
EXPECT_EQ(10, b.count());
// Add more historical values past the bucket window.
// These should be ignored.
- EXPECT_FALSE(b.addValue(seconds(4), 40.0));
- EXPECT_FALSE(b.addValue(seconds(5), 40.0));
+ EXPECT_FALSE(b.addValue(mkTimePoint(4), 40.0));
+ EXPECT_FALSE(b.addValue(mkTimePoint(5), 40.0));
EXPECT_EQ(10.0, b.avg());
EXPECT_EQ(10.0, b.rate());
EXPECT_EQ(10, b.count());
folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
IntMHTS::kMinuteHourDurations);
- seconds curTime(0);
- for (curTime = seconds(0); curTime < seconds(7200); curTime++) {
+ TimePoint curTime;
+ for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
+ curTime += seconds(1)) {
mhts.addValue(curTime, 1);
}
- for (curTime = seconds(7200); curTime < seconds(7200 + 3540); curTime++) {
+ for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
+ curTime += seconds(1)) {
mhts.addValue(curTime, 10);
}
- for (curTime = seconds(7200 + 3540); curTime < seconds(7200 + 3600);
- curTime++) {
+ for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
+ curTime += seconds(1)) {
mhts.addValue(curTime, 100);
}
mhts.flush();
struct TimeInterval {
- seconds start;
- seconds end;
+ TimePoint start;
+ TimePoint end;
};
TimeInterval intervals[12] = {
{ curTime - seconds(60), curTime },
folly::MultiLevelTimeSeries<int> mhts(
60, {seconds(60), seconds(3600), seconds(0)});
- seconds curTime(0);
- for (curTime = seconds(0); curTime < seconds(7200); curTime++) {
+ TimePoint curTime;
+ for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
+ curTime += seconds(1)) {
mhts.addValue(curTime, 1);
}
- for (curTime = seconds(7200); curTime < seconds(7200 + 3540); curTime++) {
+ for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
+ curTime += seconds(1)) {
mhts.addValue(curTime, 10);
}
- for (curTime = seconds(7200 + 3540); curTime < seconds(7200 + 3600);
- curTime++) {
+ for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
+ curTime += seconds(1)) {
mhts.addValue(curTime, 100);
}
mhts.flush();
struct TimeInterval {
- seconds start;
- seconds end;
+ TimePoint start;
+ TimePoint end;
};
std::array<TimeInterval, 12> intervals = {{