Skip to content

Commit

Permalink
rhashtable: Revert nested table changes.
Browse files Browse the repository at this point in the history
This reverts commits:

6a25478
9dbbfb0
4013790

It's too risky to put in this late in the release
cycle.  We'll put these changes into the next merge
window instead.

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Feb 16, 2017
1 parent 75224c9 commit bf3f14d
Show file tree
Hide file tree
Showing 5 changed files with 94 additions and 316 deletions.
28 changes: 11 additions & 17 deletions fs/gfs2/glock.c
Original file line number Diff line number Diff line change
Expand Up @@ -1420,32 +1420,26 @@ static struct shrinker glock_shrinker = {
* @sdp: the filesystem
* @bucket: the bucket
*
* Note that the function can be called multiple times on the same
* object. So the user must ensure that the function can cope with
* that.
*/

static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{
struct gfs2_glock *gl;
struct rhashtable_iter iter;

rhashtable_walk_enter(&gl_hash_table, &iter);

do {
gl = ERR_PTR(rhashtable_walk_start(&iter));
if (gl)
continue;
struct rhash_head *pos;
const struct bucket_table *tbl;
int i;

while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
rcu_read_lock();
tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
for (i = 0; i < tbl->size; i++) {
rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
if ((gl->gl_name.ln_sbd == sdp) &&
lockref_get_not_dead(&gl->gl_lockref))
examiner(gl);

rhashtable_walk_stop(&iter);
} while (cond_resched(), gl == ERR_PTR(-EAGAIN));

rhashtable_walk_exit(&iter);
}
}
rcu_read_unlock();
cond_resched();
}

/**
Expand Down
78 changes: 22 additions & 56 deletions include/linux/rhashtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,20 +61,17 @@ struct rhlist_head {
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
* @ntbl: Nested table used when out of memory.
* @buckets: size * hash buckets
*/
struct bucket_table {
unsigned int size;
unsigned int nest;
unsigned int rehash;
u32 hash_rnd;
unsigned int locks_mask;
Expand All @@ -84,7 +81,7 @@ struct bucket_table {

struct bucket_table __rcu *future_tbl;

struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};

/**
Expand Down Expand Up @@ -377,12 +374,6 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);

struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash);

#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))

Expand All @@ -398,27 +389,6 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })

static inline struct rhash_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}

static inline struct rhash_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}

static inline struct rhash_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
&tbl->buckets[hash];
}

/**
* rht_for_each_continue - continue iterating over hash chain
* @pos: the &struct rhash_head to use as a loop cursor.
Expand All @@ -438,7 +408,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index
*/
#define rht_for_each(pos, tbl, hash) \
rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)

/**
* rht_for_each_entry_continue - continue iterating over hash chain
Expand All @@ -463,7 +433,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
tbl, hash, member)

/**
Expand All @@ -478,13 +448,13 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* This hash chain list-traversal primitive allows for the looped code to
* remove the loop cursor from the list.
*/
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = next, \
next = !rht_is_a_nulls(pos) ? \
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = next, \
next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL)

/**
Expand Down Expand Up @@ -515,7 +485,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_rcu(pos, tbl, hash) \
rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)

/**
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
Expand Down Expand Up @@ -548,8 +518,8 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)

/**
Expand Down Expand Up @@ -595,7 +565,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
struct bucket_table *tbl;
const struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;

Expand Down Expand Up @@ -727,12 +697,8 @@ static inline void *__rhashtable_insert_fast(
}

elasticity = ht->elasticity;
pprev = rht_bucket_insert(ht, tbl, hash);
data = ERR_PTR(-ENOMEM);
if (!pprev)
goto out;

rht_for_each_continue(head, *pprev, tbl, hash) {
pprev = &tbl->buckets[hash];
rht_for_each(head, tbl, hash) {
struct rhlist_head *plist;
struct rhlist_head *list;

Expand Down Expand Up @@ -770,7 +736,7 @@ static inline void *__rhashtable_insert_fast(
if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path;

head = rht_dereference_bucket(*pprev, tbl, hash);
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

RCU_INIT_POINTER(obj->next, head);
if (rhlist) {
Expand All @@ -780,7 +746,7 @@ static inline void *__rhashtable_insert_fast(
RCU_INIT_POINTER(list->next, NULL);
}

rcu_assign_pointer(*pprev, obj);
rcu_assign_pointer(tbl->buckets[hash], obj);

atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
Expand Down Expand Up @@ -989,8 +955,8 @@ static inline int __rhashtable_remove_fast_one(

spin_lock_bh(lock);

pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(he, *pprev, tbl, hash) {
pprev = &tbl->buckets[hash];
rht_for_each(he, tbl, hash) {
struct rhlist_head *list;

list = container_of(he, struct rhlist_head, rhead);
Expand Down Expand Up @@ -1141,8 +1107,8 @@ static inline int __rhashtable_replace_fast(

spin_lock_bh(lock);

pprev = rht_bucket_var(tbl, hash);
rht_for_each_continue(he, *pprev, tbl, hash) {
pprev = &tbl->buckets[hash];
rht_for_each(he, tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
continue;
Expand Down
Loading

0 comments on commit bf3f14d

Please sign in to comment.