* @size: Number of hash buckets
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
- * @shift: Current size (1 << shift)
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @walkers: List of active walkers
unsigned int size;
unsigned int rehash;
u32 hash_rnd;
- u32 shift;
unsigned int locks_mask;
spinlock_t *locks;
struct list_head walkers;
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
- * @max_shift: Maximum number of shifts while expanding
- * @min_shift: Minimum number of shifts while shrinking
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
* @nulls_base: Base value to generate nulls marker
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Function to hash key
size_t key_len;
size_t key_offset;
size_t head_offset;
- size_t max_shift;
- size_t min_shift;
+ unsigned int max_size;
+ unsigned int min_size;
u32 nulls_base;
size_t locks_mul;
rht_hashfn_t hashfn;
#include <linux/err.h>
#define HASH_DEFAULT_SIZE 64UL
-#define HASH_MIN_SIZE 4UL
+#define HASH_MIN_SIZE 4U
#define BUCKET_LOCKS_PER_CPU 128UL
/* Base bits plus 1 bit for nulls marker */
return NULL;
tbl->size = nbuckets;
- tbl->shift = ilog2(nbuckets);
if (alloc_bucket_locks(ht, tbl) < 0) {
bucket_table_free(tbl);
{
/* Expand table when exceeding 75% load */
return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
- (!ht->p.max_shift || tbl->shift < ht->p.max_shift);
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
}
/**
{
/* Shrink table beneath 30% load */
return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
- tbl->shift > ht->p.min_shift;
+ tbl->size > ht->p.min_size;
}
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
static size_t rounded_hashtable_size(struct rhashtable_params *params)
{
return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
- 1UL << params->min_shift);
+ (unsigned long)params->min_size);
}
/**
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
return -EINVAL;
- params->min_shift = max_t(size_t, params->min_shift,
- ilog2(HASH_MIN_SIZE));
+ params->min_size = max(params->min_size, HASH_MIN_SIZE);
if (params->nelem_hint)
size = rounded_hashtable_size(params);
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int),
.hashfn = jhash,
- .max_shift = 1, /* we expand/shrink manually here */
+ .max_size = 2, /* we expand/shrink manually here */
.nulls_base = (3U << RHT_BASE_SHIFT),
};
int err;
.key_offset = offsetof(struct netlink_sock, portid),
.key_len = sizeof(u32), /* portid */
.hashfn = jhash,
- .max_shift = 16, /* 64K */
+ .max_size = 65536,
};
if (err != 0)
.key_offset = offsetof(struct tipc_sock, portid),
.key_len = sizeof(u32), /* portid */
.hashfn = jhash,
- .max_shift = 20, /* 1M */
- .min_shift = 8, /* 256 */
+ .max_size = 1048576,
+ .min_size = 256,
};
return rhashtable_init(&tn->sk_rht, &rht_params);