/*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2012-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#pragma once
-#include <algorithm>
-#include <glog/logging.h>
#include <folly/Likely.h>
#include <folly/stats/BucketedTimeSeries.h>
+#include <glog/logging.h>
+#include <algorithm>
+#include <stdexcept>
namespace folly {
// There is no point in having more buckets than our timestamp
// granularity: otherwise we would have buckets that could never be used.
if (nBuckets > size_t(duration_.count())) {
- nBuckets = duration_.count();
+ nBuckets = size_t(duration_.count());
}
buckets_.resize(nBuckets, Bucket());
}
}
+template <typename VT, typename CT>
+BucketedTimeSeries<VT, CT>::BucketedTimeSeries(
+ TimePoint theFirstTime,
+ TimePoint theLatestTime,
+ Duration maxDuration,
+ const std::vector<Bucket>& bucketsList)
+ : firstTime_(theFirstTime),
+ latestTime_(theLatestTime),
+ duration_(maxDuration),
+ buckets_(bucketsList) {
+ // Come up with the total_ from buckets_ being passed in
+ for (auto const& bucket : buckets_) {
+ total_.add(bucket.sum, bucket.count);
+ }
+
+ // Verify the integrity of the data
+
+ // If firstTime is greater than latestTime, the total count should be 0.
+ // (firstTime being greater than latestTime means that no data points have
+ // ever been added to the time series.)
+ if (firstTime_ > latestTime_ && (total_.sum != 0 || total_.count != 0)) {
+ throw std::invalid_argument(
+ "The total should have been 0 "
+ "if firstTime is greater than lastestTime");
+ }
+
+ // If firstTime is less than or equal to latestTime,
+ // latestTime - firstTime should be less than or equal to the duration.
+ if (firstTime_ <= latestTime_ && latestTime_ - firstTime_ > duration_) {
+ throw std::invalid_argument(
+ "The difference between firstTime and latestTime "
+ "should be less than or equal to the duration");
+ }
+}
+
template <typename VT, typename CT>
bool BucketedTimeSeries<VT, CT>::addValue(TimePoint now, const ValueType& val) {
return addValueAggregated(now, val, 1);
bool BucketedTimeSeries<VT, CT>::addValue(
TimePoint now,
const ValueType& val,
- int64_t times) {
- return addValueAggregated(now, val * times, times);
+ uint64_t times) {
+ return addValueAggregated(now, val * ValueType(times), times);
}
template <typename VT, typename CT>
bool BucketedTimeSeries<VT, CT>::addValueAggregated(
TimePoint now,
const ValueType& total,
- int64_t nsamples) {
+ uint64_t nsamples) {
if (isAllTime()) {
if (UNLIKELY(empty())) {
firstTime_ = now;
size_t currentBucket;
TimePoint currentBucketStart;
TimePoint nextBucketStart;
- getBucketInfo(latestTime_, ¤tBucket,
- ¤tBucketStart, &nextBucketStart);
+ getBucketInfo(
+ latestTime_, ¤tBucket, ¤tBucketStart, &nextBucketStart);
// Update latestTime_
latestTime_ = now;
size_t currentBucket;
TimePoint currentBucketStart;
TimePoint nextBucketStart;
- getBucketInfo(latestTime_, ¤tBucket,
- ¤tBucketStart, &nextBucketStart);
+ getBucketInfo(
+ latestTime_, ¤tBucket, ¤tBucketStart, &nextBucketStart);
// Subtract 1 duration from the start of the next bucket to find the
// earliest possible data point we could be tracking.
TimePoint bucketStart,
TimePoint nextBucketStart) -> bool {
sample_count += this->rangeAdjust(
- bucketStart, nextBucketStart, start, end, bucket.count);
+ bucketStart, nextBucketStart, start, end, ValueType(bucket.count));
return true;
});
total += this->rangeAdjust(
bucketStart, nextBucketStart, start, end, bucket.sum);
sample_count += this->rangeAdjust(
- bucketStart, nextBucketStart, start, end, bucket.count);
+ bucketStart, nextBucketStart, start, end, ValueType(bucket.count));
return true;
});
Duration timeMod = time.time_since_epoch() % duration_;
TimeInt numFullDurations = time.time_since_epoch() / duration_;
- TimeInt scaledTime = timeMod.count() * buckets_.size();
+ TimeInt scaledTime = timeMod.count() * TimeInt(buckets_.size());
// Keep these two lines together. The compiler should be able to compute
// both the division and modulus with a single operation.
- *bucketIdx = scaledTime / duration_.count();
+ *bucketIdx = size_t(scaledTime / duration_.count());
TimeInt scaledOffsetInBucket = scaledTime % duration_.count();
TimeInt scaledBucketStart = scaledTime - scaledOffsetInBucket;
Duration timeMod = latestTime_.time_since_epoch() % duration_;
TimeInt numFullDurations = latestTime_.time_since_epoch() / duration_;
TimePoint durationStart(numFullDurations * duration_);
- TimeInt scaledTime = timeMod.count() * buckets_.size();
- size_t latestBucketIdx = scaledTime / duration_.count();
+ TimeInt scaledTime = timeMod.count() * TimeInt(buckets_.size());
+ size_t latestBucketIdx = size_t(scaledTime / duration_.count());
TimeInt scaledOffsetInBucket = scaledTime % duration_.count();
TimeInt scaledBucketStart = scaledTime - scaledOffsetInBucket;
TimeInt scaledNextBucketStart = scaledBucketStart + duration_.count();
TimePoint intervalStart = std::max(start, bucketStart);
TimePoint intervalEnd = std::min(end, nextBucketStart);
return input * (intervalEnd - intervalStart) /
- (nextBucketStart - bucketStart);
+ (nextBucketStart - bucketStart);
}
template <typename VT, typename CT>
});
}
-} // folly
+} // namespace folly