Summary: Make `IndexedMemPool` call `Traits::onRecycle` on the element just before it is marked as deallocated. This mirrors the allocation behavior implemented in
D5177462 and simplifies preventing access to recycled elements (the client just needs to check `isAllocated` before accessing the element).
Reviewed By: nbronson
Differential Revision:
D5275283
fbshipit-source-id:
58365b5b7b32b07fa56529c476078f241fc20811
/// Gives up ownership previously granted by alloc()
void recycleIndex(uint32_t idx) {
assert(isAllocated(idx));
/// Gives up ownership previously granted by alloc()
void recycleIndex(uint32_t idx) {
assert(isAllocated(idx));
- Traits::onRecycle(&slot(idx).elem);
localPush(localHead(), idx);
}
localPush(localHead(), idx);
}
Slot& s = slot(idx);
TaggedPtr h = head.load(std::memory_order_acquire);
while (true) {
Slot& s = slot(idx);
TaggedPtr h = head.load(std::memory_order_acquire);
while (true) {
- s.localNext.store(h.idx, std::memory_order_relaxed);
+ s.localNext.store(h.idx, std::memory_order_release);
+ Traits::onRecycle(&slot(idx).elem);
if (h.size() == LocalListLimit) {
// push will overflow local list, steal it instead
if (h.size() == LocalListLimit) {
// push will overflow local list, steal it instead
elem = nullptr;
EXPECT_CALL(traits, onRecycle(_)).WillOnce(Invoke([&](std::string* s) {
elem = nullptr;
EXPECT_CALL(traits, onRecycle(_)).WillOnce(Invoke([&](std::string* s) {
- EXPECT_TRUE(pool.isAllocated(pool.locateElem(s)));
+ EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
elem = s;
}));
pool.recycleIndex(pool.locateElem(ptr));
elem = s;
}));
pool.recycleIndex(pool.locateElem(ptr));