Skip to content

Commit

Permalink
Merge branch 'rhashtable-New-features-in-walk-and-bucket'
Browse files Browse the repository at this point in the history
Tom Herbert says:

====================
rhashtable: New features in walk and bucket

This patch contains some changes to related rhashtable:

- Above allow rhashtable_walk_start to return void
- Add a functon to peek at the next entry during a walk
- Abstract out function to compute a has for a table
- A library function to alloc a spinlocks bucket array
- Call the above function for rhashtable locks allocation

Tested: Exercised using various operations on an ILA xlat
table.

v2:
 - Apply feedback from Herbert. Don't change semantics of resize
   event reporting and -EAGAIN, just simplify API for callers that
   ignore those.
 - Add end_of_table in iter to reliably tell when the iterator has
   reached to the eno.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Dec 11, 2017
2 parents a0b586f + 64e0cd0 commit 9944a0f
Show file tree
Hide file tree
Showing 19 changed files with 224 additions and 160 deletions.
6 changes: 1 addition & 5 deletions drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1412,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
void *flow_node;
int rc, i;

rc = rhashtable_walk_start(iter);
if (rc && rc != -EAGAIN) {
i = 0;
goto done;
}
rhashtable_walk_start(iter);

rc = 0;
for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
Expand Down
7 changes: 3 additions & 4 deletions drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
Original file line number Diff line number Diff line change
Expand Up @@ -763,9 +763,7 @@ static void ch_flower_stats_handler(struct work_struct *work)

rhashtable_walk_enter(&adap->flower_tbl, &iter);
do {
flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
if (IS_ERR(flower_entry))
goto walk_stop;
rhashtable_walk_start(&iter);

while ((flower_entry = rhashtable_walk_next(&iter)) &&
!IS_ERR(flower_entry)) {
Expand All @@ -784,8 +782,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
spin_unlock(&flower_entry->lock);
}
}
walk_stop:

rhashtable_walk_stop(&iter);

} while (flower_entry == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
Expand Down
7 changes: 2 additions & 5 deletions fs/gfs2/glock.c
Original file line number Diff line number Diff line change
Expand Up @@ -1549,16 +1549,13 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
rhashtable_walk_enter(&gl_hash_table, &iter);

do {
gl = ERR_PTR(rhashtable_walk_start(&iter));
if (IS_ERR(gl))
goto walk_stop;
rhashtable_walk_start(&iter);

while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
if (gl->gl_name.ln_sbd == sdp &&
lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);

walk_stop:
rhashtable_walk_stop(&iter);
} while (cond_resched(), gl == ERR_PTR(-EAGAIN));

Expand Down Expand Up @@ -1947,7 +1944,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
loff_t n = *pos;

rhashtable_walk_enter(&gl_hash_table, &gi->hti);
if (rhashtable_walk_start(&gi->hti) != 0)
if (rhashtable_walk_start_check(&gi->hti) != 0)
return NULL;

do {
Expand Down
38 changes: 27 additions & 11 deletions include/linux/rhashtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,7 @@ struct rhashtable_iter {
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
bool end_of_table;
};

static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
Expand Down Expand Up @@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
}

static inline unsigned int rht_key_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const void *key, const struct rhashtable_params params)
static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
const void *key, const struct rhashtable_params params,
unsigned int hash_rnd)
{
unsigned int hash;

/* params must be equal to ht->p if it isn't constant. */
if (!__builtin_constant_p(params.key_len))
hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
else if (params.key_len) {
unsigned int key_len = params.key_len;

if (params.hashfn)
hash = params.hashfn(key, key_len, tbl->hash_rnd);
hash = params.hashfn(key, key_len, hash_rnd);
else if (key_len & (sizeof(u32) - 1))
hash = jhash(key, key_len, tbl->hash_rnd);
hash = jhash(key, key_len, hash_rnd);
else
hash = jhash2(key, key_len / sizeof(u32),
tbl->hash_rnd);
hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
} else {
unsigned int key_len = ht->p.key_len;

if (params.hashfn)
hash = params.hashfn(key, key_len, tbl->hash_rnd);
hash = params.hashfn(key, key_len, hash_rnd);
else
hash = jhash(key, key_len, tbl->hash_rnd);
hash = jhash(key, key_len, hash_rnd);
}

return hash;
}

static inline unsigned int rht_key_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const void *key, const struct rhashtable_params params)
{
unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);

return rht_bucket_index(tbl, hash);
}

Expand Down Expand Up @@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);

static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
{
(void)rhashtable_walk_start_check(iter);
}

void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);

void rhashtable_free_and_destroy(struct rhashtable *ht,
Expand Down
6 changes: 6 additions & 0 deletions include/linux/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -414,4 +414,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))

int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp);

void free_bucket_spinlocks(spinlock_t *locks);

#endif /* __LINUX_SPINLOCK_H */
2 changes: 1 addition & 1 deletion include/net/sctp/sctp.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);

int sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter);
Expand Down
2 changes: 1 addition & 1 deletion lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
Expand Down
54 changes: 54 additions & 0 deletions lib/bucket_locks.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
* indicate the number of elements to allocate in the array. max_size
* gives the maximum number of elements to allocate. cpu_mult gives
* the number of locks per CPU to allocate. The size is rounded up
* to a power of 2 to be suitable as a hash table.
*/

int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
size_t max_size, unsigned int cpu_mult, gfp_t gfp)
{
spinlock_t *tlocks = NULL;
unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
unsigned int nr_pcpus = 2;
#else
unsigned int nr_pcpus = num_possible_cpus();
#endif

if (cpu_mult) {
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
} else {
size = max_size;
}

if (sizeof(spinlock_t) != 0) {
if (gfpflags_allow_blocking(gfp))
tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
else
tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
for (i = 0; i < size; i++)
spin_lock_init(&tlocks[i]);
}

*locks = tlocks;
*locks_mask = size - 1;

return 0;
}
EXPORT_SYMBOL(alloc_bucket_spinlocks);

void free_bucket_spinlocks(spinlock_t *locks)
{
kvfree(locks);
}
EXPORT_SYMBOL(free_bucket_spinlocks);
Loading

0 comments on commit 9944a0f

Please sign in to comment.