struct rhash_head *he;
spinlock_t *lock;
unsigned int hash;
+ bool ret = false;
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
}
rcu_assign_pointer(*pprev, obj->next);
- atomic_dec(&ht->nelems);
-
- spin_unlock_bh(lock);
-
- rhashtable_wakeup_worker(ht);
-
- rcu_read_unlock();
- return true;
+ ret = true;
+ break;
}
+ /* The entry may be linked in either 'tbl', 'future_tbl', or both.
+ * 'future_tbl' only exists for a short period of time during
+ * resizing. Thus traversing both is fine and the added cost is
+ * very rare.
+ */
if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
spin_unlock_bh(lock);
}
spin_unlock_bh(lock);
+
+ if (ret) {
+ atomic_dec(&ht->nelems);
+ rhashtable_wakeup_worker(ht);
+ }
+
rcu_read_unlock();
- return false;
+ return ret;
}
EXPORT_SYMBOL_GPL(rhashtable_remove);