Skip to content

Commit

Permalink
rhashtable: kill ht->shift atomic operations
Browse files Browse the repository at this point in the history
Commit c0c09bf ("rhashtable: avoid unnecessary wakeup for worker
queue") changed ht->shift to be atomic, which is actually unnecessary.

Instead of leaving the current shift in the core rhashtable structure,
it can be cached inside the individual bucket tables.

There, it will only be initialized once during a new table allocation
in the shrink/expansion slow path, and from then onward it stays immutable
for the rest of the bucket table liftime.

That allows shift to be non-atomic. The patch also moves hash_rnd
management into the table setup. The rhashtable structure now consumes
3 instead of 4 cachelines.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: Ying Xue <ying.xue@windriver.com>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Daniel Borkmann authored and David S. Miller committed Mar 13, 2015
1 parent 9497df8 commit a5b6846
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 33 deletions.
6 changes: 3 additions & 3 deletions include/linux/rhashtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,15 @@ struct rhash_head {
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @hash_rnd: Random seed to fold into hash
* @shift: Current size (1 << shift)
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @buckets: size * hash buckets
*/
struct bucket_table {
size_t size;
u32 hash_rnd;
u32 shift;
unsigned int locks_mask;
spinlock_t *locks;

Expand Down Expand Up @@ -99,7 +101,6 @@ struct rhashtable_params {
* @tbl: Bucket table
* @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
* @shift: Current size (1 << shift)
* @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
Expand All @@ -110,12 +111,11 @@ struct rhashtable {
struct bucket_table __rcu *tbl;
struct bucket_table __rcu *future_tbl;
atomic_t nelems;
atomic_t shift;
bool being_destroyed;
struct rhashtable_params p;
struct work_struct run_work;
struct mutex mutex;
struct list_head walkers;
bool being_destroyed;
};

/**
Expand Down
55 changes: 25 additions & 30 deletions lib/rhashtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
}

static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
size_t nbuckets)
size_t nbuckets, u32 hash_rnd)
{
struct bucket_table *tbl = NULL;
size_t size;
Expand All @@ -162,6 +162,8 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
return NULL;

tbl->size = nbuckets;
tbl->shift = ilog2(nbuckets);
tbl->hash_rnd = hash_rnd;

if (alloc_bucket_locks(ht, tbl) < 0) {
bucket_table_free(tbl);
Expand All @@ -177,25 +179,27 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
/**
* rht_grow_above_75 - returns true if nelems > 0.75 * table-size
* @ht: hash table
* @new_size: new table size
* @tbl: current table
*/
static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
static bool rht_grow_above_75(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
/* Expand table when exceeding 75% load */
return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
(!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
(!ht->p.max_shift || tbl->shift < ht->p.max_shift);
}

/**
* rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
* @ht: hash table
* @new_size: new table size
* @tbl: current table
*/
static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
static bool rht_shrink_below_30(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
/* Shrink table beneath 30% load */
return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
(atomic_read(&ht->shift) > ht->p.min_shift);
return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
tbl->shift > ht->p.min_shift;
}

static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
Expand Down Expand Up @@ -310,16 +314,11 @@ int rhashtable_expand(struct rhashtable *ht)

ASSERT_RHT_MUTEX(ht);

new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, old_tbl->hash_rnd);
if (new_tbl == NULL)
return -ENOMEM;

new_tbl->hash_rnd = old_tbl->hash_rnd;

atomic_inc(&ht->shift);

rhashtable_rehash(ht, new_tbl);

return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);
Expand All @@ -342,20 +341,15 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
*/
int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);

ASSERT_RHT_MUTEX(ht);

new_tbl = bucket_table_alloc(ht, tbl->size / 2);
new_tbl = bucket_table_alloc(ht, old_tbl->size / 2, old_tbl->hash_rnd);
if (new_tbl == NULL)
return -ENOMEM;

new_tbl->hash_rnd = tbl->hash_rnd;

atomic_dec(&ht->shift);

rhashtable_rehash(ht, new_tbl);

return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);
Expand All @@ -376,9 +370,9 @@ static void rht_deferred_worker(struct work_struct *work)
list_for_each_entry(walker, &ht->walkers, list)
walker->resize = true;

if (rht_grow_above_75(ht, tbl->size))
if (rht_grow_above_75(ht, tbl))
rhashtable_expand(ht);
else if (rht_shrink_below_30(ht, tbl->size))
else if (rht_shrink_below_30(ht, tbl))
rhashtable_shrink(ht);
unlock:
mutex_unlock(&ht->mutex);
Expand Down Expand Up @@ -431,7 +425,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
rcu_assign_pointer(tbl->buckets[hash], obj);

atomic_inc(&ht->nelems);
if (no_resize_running && rht_grow_above_75(ht, tbl->size))
if (no_resize_running && rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);

exit:
Expand Down Expand Up @@ -539,7 +533,7 @@ bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
bool no_resize_running = tbl == old_tbl;

atomic_dec(&ht->nelems);
if (no_resize_running && rht_shrink_below_30(ht, tbl->size))
if (no_resize_running && rht_shrink_below_30(ht, tbl))
schedule_work(&ht->run_work);
}

Expand Down Expand Up @@ -913,6 +907,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
struct bucket_table *tbl;
size_t size;
u32 hash_rnd;

size = HASH_DEFAULT_SIZE;

Expand All @@ -939,14 +934,14 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
else
ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

tbl = bucket_table_alloc(ht, size);
get_random_bytes(&hash_rnd, sizeof(hash_rnd));

tbl = bucket_table_alloc(ht, size, hash_rnd);
if (tbl == NULL)
return -ENOMEM;

get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

atomic_set(&ht->nelems, 0);
atomic_set(&ht->shift, ilog2(tbl->size));

RCU_INIT_POINTER(ht->tbl, tbl);
RCU_INIT_POINTER(ht->future_tbl, tbl);

Expand Down

0 comments on commit a5b6846

Please sign in to comment.