Skip to content

Commit

Permalink
rhashtable: replace rht_ptr_locked() with rht_assign_locked()
Browse files Browse the repository at this point in the history
The only times rht_ptr_locked() is used, it is to store a new
value in a bucket-head.  This is the only time it makes sense
to use it too.  So replace it by a function which does the
whole task:  Sets the lock bit and assigns to a bucket head.

Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
NeilBrown authored and David S. Miller committed Apr 13, 2019
1 parent adc6a3a commit f4712b4
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 6 deletions.
9 changes: 6 additions & 3 deletions include/linux/rhashtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
* provides the same release semantics that bit_spin_unlock() provides,
* this is safe.
* When we write to a bucket without unlocking, we use rht_assign_locked().
*/

static inline void rht_lock(struct bucket_table *tbl,
Expand Down Expand Up @@ -369,10 +370,12 @@ static inline struct rhash_head *rht_ptr_exclusive(
return (void *)(((unsigned long)p) & ~BIT(1));
}

static inline struct rhash_lock_head __rcu *rht_ptr_locked(const
struct rhash_head *p)
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj)
{
return (void *)(((unsigned long)p) | BIT(1));
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;

rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(1)));
}

static inline void rht_assign_unlock(struct bucket_table *tbl,
Expand Down
6 changes: 3 additions & 3 deletions lib/rhashtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, next);
else
/* Need to preserved the bit lock. */
rcu_assign_pointer(*bkt, rht_ptr_locked(next));
rht_assign_locked(bkt, next);

out:
return err;
Expand Down Expand Up @@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, obj);
else
/* Need to preserve the bit lock */
rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
rht_assign_locked(bkt, obj);

return NULL;
}
Expand Down Expand Up @@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
/* bkt is always the head of the list, so it holds
* the lock, which we need to preserve
*/
rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
rht_assign_locked(bkt, obj);

atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
Expand Down

0 comments on commit f4712b4

Please sign in to comment.