class SysAlloc {
public:
void* allocate(size_t size) {
- void* mem = malloc(size);
- if (!mem) throw std::bad_alloc();
- return mem;
+ return checkedMalloc(size);
}
void deallocate(void* p) {
// one extra Char for the null terminator.
auto const allocSize =
goodMallocSize((1 + rhs.ml_.size_) * sizeof(Char));
- ml_.data_ = static_cast<Char*>(malloc(allocSize));
+ ml_.data_ = static_cast<Char*>(checkedMalloc(allocSize));
fbstring_detail::pod_copy(rhs.ml_.data_,
// 1 for terminator
rhs.ml_.data_ + rhs.ml_.size_ + 1,
// Medium strings are allocated normally. Don't forget to
// allocate one extra Char for the terminating null.
auto const allocSize = goodMallocSize((1 + size) * sizeof(Char));
- ml_.data_ = static_cast<Char*>(malloc(allocSize));
+ ml_.data_ = static_cast<Char*>(checkedMalloc(allocSize));
fbstring_detail::pod_copy(data, data + size, ml_.data_);
ml_.size_ = size;
ml_.capacity_ = (allocSize / sizeof(Char) - 1) | isMedium;
// Don't forget to allocate one extra Char for the terminating null
auto const allocSizeBytes =
goodMallocSize((1 + minCapacity) * sizeof(Char));
- auto const data = static_cast<Char*>(malloc(allocSizeBytes));
+ auto const data = static_cast<Char*>(checkedMalloc(allocSizeBytes));
auto const size = smallSize();
fbstring_detail::pod_copy(small_, small_ + size + 1, data);
// No need for writeTerminator(), we wrote it above with + 1.
// struct.
const size_t allocSize = goodMallocSize(
sizeof(RefCounted) + *size * sizeof(Char));
- auto result = static_cast<RefCounted*>(malloc(allocSize));
+ auto result = static_cast<RefCounted*>(checkedMalloc(allocSize));
result->refCount_.store(1, std::memory_order_release);
*size = (allocSize - sizeof(RefCounted)) / sizeof(Char);
return result;
for (;;) {
// This looks quadratic but it really depends on realloc
auto const newSize = size + 128;
- buf = static_cast<char*>(realloc(buf, newSize));
+ buf = static_cast<char*>(checkedRealloc(buf, newSize));
is.getline(buf + size, newSize - size, delim);
if (is.bad() || is.eof() || !is.fail()) {
// done by either failure, end of file, or normal read
}
auto const nBytes = goodMallocSize(n * sizeof(T));
- b_ = static_cast<T*>(malloc(nBytes));
+ b_ = static_cast<T*>(checkedMalloc(nBytes));
fbvector_detail::uninitializedFillDefaultOrFree(b_, n);
e_ = b_ + n;
z_ = b_ + nBytes / sizeof(T);
}
auto const nBytes = goodMallocSize(n * sizeof(T));
- b_ = static_cast<T*>(malloc(nBytes));
+ b_ = static_cast<T*>(checkedMalloc(nBytes));
fbvector_detail::uninitializedFillOrFree(b_, n, value);
e_ = b_ + n;
z_ = b_ + nBytes / sizeof(T);
// Must reallocate - just do it on the side
auto const nBytes = goodMallocSize(newSize * sizeof(T));
- auto const b = static_cast<T*>(malloc(nBytes));
+ auto const b = static_cast<T*>(checkedMalloc(nBytes));
std::uninitialized_copy(first, last, b);
this->fbvector::~fbvector();
b_ = b;
assert(crtCapacity < n); // reserve_in_place should have taken
// care of this
auto const newCapacityBytes = goodMallocSize(n * sizeof(T));
- auto b = static_cast<T*>(malloc(newCapacityBytes));
+ auto b = static_cast<T*>(checkedMalloc(newCapacityBytes));
auto const oldSize = size();
memcpy(b, b_, oldSize * sizeof(T));
// Done with the old chunk. Free but don't call destructors!
// expanded in place, and this constant reflects that.
static const size_t jemallocMinInPlaceExpandable = 4096;
+/**
+ * Trivial wrappers around malloc, calloc, realloc that check for allocation
+ * failure and throw std::bad_alloc in that case.
+ */
+inline void* checkedMalloc(size_t size) {
+ void* p = malloc(size);
+ if (!p) throw std::bad_alloc();
+ return p;
+}
+
+inline void* checkedCalloc(size_t n, size_t size) {
+ void* p = calloc(n, size);
+ if (!p) throw std::bad_alloc();
+ return p;
+}
+
+inline void* checkedRealloc(void* ptr, size_t size) {
+ void* p = realloc(ptr, size);
+ if (!p) throw std::bad_alloc();
+ return p;
+}
+
/**
* This function tries to reallocate a buffer of which only the first
* currentSize bytes are used. The problem with using realloc is that
return p;
}
// Cannot expand; must move
- auto const result = malloc(newCapacity);
+ auto const result = checkedMalloc(newCapacity);
std::memcpy(result, p, currentSize);
free(p);
return result;
auto const slack = currentCapacity - currentSize;
if (slack * 2 > currentSize) {
// Too much slack, malloc-copy-free cycle:
- auto const result = malloc(newCapacity);
+ auto const result = checkedMalloc(newCapacity);
std::memcpy(result, p, currentSize);
free(p);
return result;
}
// If there's not too much slack, we realloc in hope of coalescing
- return realloc(p, newCapacity);
+ return checkedRealloc(p, newCapacity);
}
#ifdef _LIBSTDCXX_FBSTRING
needBytes += kHeapifyCapacitySize;
}
auto const sizeBytes = goodMallocSize(needBytes);
- void* newh = std::malloc(sizeBytes);
- if (!newh) {
- throw std::bad_alloc();
- }
+ void* newh = checkedMalloc(sizeBytes);
// We expect newh to be at least 2-aligned, because we want to
// use its least significant bit as a flag.
assert(!detail::pointerFlagGet(newh));