Skip to content

Commit

Permalink
mac80211: mesh: move some code to make it static
Browse files Browse the repository at this point in the history
There's no need to have table functions in one
file and all users in another, move the functions
to the right file and make them static. Also move
a static variable to the beginning of the file to
make it easier to find.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
  • Loading branch information
Johannes Berg authored and John W. Linville committed May 12, 2011
1 parent 85a9994 commit 6b86bd6
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 54 deletions.
43 changes: 0 additions & 43 deletions net/mac80211/mesh.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,49 +287,6 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
}
}

u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
{
/* Use last four bytes of hw addr and interface index as hash index */
return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
& tbl->hash_mask;
}

struct mesh_table *mesh_table_alloc(int size_order)
{
int i;
struct mesh_table *newtbl;

newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
if (!newtbl)
return NULL;

newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
(1 << size_order), GFP_KERNEL);

if (!newtbl->hash_buckets) {
kfree(newtbl);
return NULL;
}

newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
(1 << size_order), GFP_KERNEL);
if (!newtbl->hashwlock) {
kfree(newtbl->hash_buckets);
kfree(newtbl);
return NULL;
}

newtbl->size_order = size_order;
newtbl->hash_mask = (1 << size_order) - 1;
atomic_set(&newtbl->entries, 0);
get_random_bytes(&newtbl->hash_rnd,
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);

return newtbl;
}


static void ieee80211_mesh_path_timer(unsigned long data)
{
Expand Down
4 changes: 0 additions & 4 deletions net/mac80211/mesh.h
Original file line number Diff line number Diff line change
Expand Up @@ -240,12 +240,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,

/* Private interfaces */
/* Mesh tables */
struct mesh_table *mesh_table_alloc(int size_order);
void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
void mesh_mpath_table_grow(void);
void mesh_mpp_table_grow(void);
u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl);
/* Mesh paths */
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode,
const u8 *ra, struct ieee80211_sub_if_data *sdata);
Expand Down
60 changes: 53 additions & 7 deletions net/mac80211/mesh_pathtbl.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,58 @@ static struct mesh_table *mesh_paths;
static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */

int mesh_paths_generation;

/* This lock will have the grow table function as writer and add / delete nodes
* as readers. When reading the table (i.e. doing lookups) we are well protected
* by RCU
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);


static struct mesh_table *mesh_table_alloc(int size_order)
{
int i;
struct mesh_table *newtbl;

newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
if (!newtbl)
return NULL;

newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
(1 << size_order), GFP_KERNEL);

if (!newtbl->hash_buckets) {
kfree(newtbl);
return NULL;
}

newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
(1 << size_order), GFP_KERNEL);
if (!newtbl->hashwlock) {
kfree(newtbl->hash_buckets);
kfree(newtbl);
return NULL;
}

newtbl->size_order = size_order;
newtbl->hash_mask = (1 << size_order) - 1;
atomic_set(&newtbl->entries, 0);
get_random_bytes(&newtbl->hash_rnd,
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);

return newtbl;
}

static void __mesh_table_free(struct mesh_table *tbl)
{
kfree(tbl->hash_buckets);
kfree(tbl->hashwlock);
kfree(tbl);
}

void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
Expand All @@ -66,7 +110,7 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
}

static int mesh_table_grow(struct mesh_table *oldtbl,
struct mesh_table *newtbl)
struct mesh_table *newtbl)
{
struct hlist_head *oldhash;
struct hlist_node *p, *q;
Expand Down Expand Up @@ -97,12 +141,14 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
return -ENOMEM;
}

static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
/* Use last four bytes of hw addr and interface index as hash index */
return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
& tbl->hash_mask;
}

/* This lock will have the grow table function as writer and add / delete nodes
* as readers. When reading the table (i.e. doing lookups) we are well protected
* by RCU
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);

/**
*
Expand Down

0 comments on commit 6b86bd6

Please sign in to comment.