Skip to content

Commit

Permalink
Merge branch 'tipc_net-next' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/paulg/linux
  • Loading branch information
David S. Miller committed Apr 21, 2012
2 parents 2528a5d + 9d52ce4 commit 167de77
Show file tree
Hide file tree
Showing 11 changed files with 172 additions and 107 deletions.
20 changes: 19 additions & 1 deletion net/tipc/addr.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,29 @@ static inline u32 tipc_cluster_mask(u32 addr)
return addr & TIPC_CLUSTER_MASK;
}

static inline int in_own_cluster(u32 addr)
static inline int in_own_cluster_exact(u32 addr)
{
return !((addr ^ tipc_own_addr) >> 12);
}

/**
* in_own_node - test for node inclusion; <0.0.0> always matches
*/

static inline int in_own_node(u32 addr)
{
return (addr == tipc_own_addr) || !addr;
}

/**
* in_own_cluster - test for cluster inclusion; <0.0.0> always matches
*/

static inline int in_own_cluster(u32 addr)
{
return in_own_cluster_exact(addr) || !addr;
}

/**
* addr_domain - convert 2-bit scope value to equivalent message lookup domain
*
Expand Down
2 changes: 1 addition & 1 deletion net/tipc/bearer.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
if (tipc_in_scope(disc_domain, tipc_own_addr)) {
disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
res = 0; /* accept any node in own cluster */
} else if (in_own_cluster(disc_domain))
} else if (in_own_cluster_exact(disc_domain))
res = 0; /* accept specified node in own cluster */
}
if (res) {
Expand Down
2 changes: 1 addition & 1 deletion net/tipc/config.c
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area

/* Check command authorization */

if (likely(orig_node == tipc_own_addr)) {
if (likely(in_own_node(orig_node))) {
/* command is permitted */
} else if (cmd >= 0x8000) {
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
Expand Down
119 changes: 80 additions & 39 deletions net/tipc/name_distr.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,37 @@ struct distr_item {
};

/**
* List of externally visible publications by this node --
* that is, all publications having scope > TIPC_NODE_SCOPE.
* struct publ_list - list of publications made by this node
* @list: circular list of publications
* @list_size: number of entries in list
*/
struct publ_list {
struct list_head list;
u32 size;
};

static struct publ_list publ_zone = {
.list = LIST_HEAD_INIT(publ_zone.list),
.size = 0,
};

static struct publ_list publ_cluster = {
.list = LIST_HEAD_INIT(publ_cluster.list),
.size = 0,
};

static struct publ_list publ_node = {
.list = LIST_HEAD_INIT(publ_node.list),
.size = 0,
};

static struct publ_list *publ_lists[] = {
NULL,
&publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
&publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
&publ_node /* publ_lists[TIPC_NODE_SCOPE] */
};

static LIST_HEAD(publ_root);
static u32 publ_cnt;

/**
* publ_to_item - add publication info to a publication message
Expand Down Expand Up @@ -132,8 +157,11 @@ void tipc_named_publish(struct publication *publ)
struct sk_buff *buf;
struct distr_item *item;

list_add_tail(&publ->local_list, &publ_root);
publ_cnt++;
list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
publ_lists[publ->scope]->size++;

if (publ->scope == TIPC_NODE_SCOPE)
return;

buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
Expand All @@ -156,7 +184,10 @@ void tipc_named_withdraw(struct publication *publ)
struct distr_item *item;

list_del(&publ->local_list);
publ_cnt--;
publ_lists[publ->scope]->size--;

if (publ->scope == TIPC_NODE_SCOPE)
return;

buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
Expand All @@ -169,6 +200,39 @@ void tipc_named_withdraw(struct publication *publ)
named_cluster_distribute(buf);
}

/*
* named_distribute - prepare name info for bulk distribution to another node
*/
static void named_distribute(struct list_head *message_list, u32 node,
struct publ_list *pls, u32 max_item_buf)
{
struct publication *publ;
struct sk_buff *buf = NULL;
struct distr_item *item = NULL;
u32 left = 0;
u32 rest = pls->size * ITEM_SIZE;

list_for_each_entry(publ, &pls->list, local_list) {
if (!buf) {
left = (rest <= max_item_buf) ? rest : max_item_buf;
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
if (!buf) {
warn("Bulk publication failure\n");
return;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
}
publ_to_item(item, publ);
item++;
left -= ITEM_SIZE;
if (!left) {
list_add_tail((struct list_head *)buf, message_list);
buf = NULL;
}
}
}

/**
* tipc_named_node_up - tell specified node about all publications by this node
*/
Expand All @@ -177,13 +241,8 @@ void tipc_named_node_up(unsigned long nodearg)
{
struct tipc_node *n_ptr;
struct tipc_link *l_ptr;
struct publication *publ;
struct distr_item *item = NULL;
struct sk_buff *buf = NULL;
struct list_head message_list;
u32 node = (u32)nodearg;
u32 left = 0;
u32 rest;
u32 max_item_buf = 0;

/* compute maximum amount of publication data to send per message */
Expand All @@ -207,28 +266,8 @@ void tipc_named_node_up(unsigned long nodearg)
INIT_LIST_HEAD(&message_list);

read_lock_bh(&tipc_nametbl_lock);
rest = publ_cnt * ITEM_SIZE;

list_for_each_entry(publ, &publ_root, local_list) {
if (!buf) {
left = (rest <= max_item_buf) ? rest : max_item_buf;
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
if (!buf) {
warn("Bulk publication distribution failure\n");
goto exit;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
}
publ_to_item(item, publ);
item++;
left -= ITEM_SIZE;
if (!left) {
list_add_tail((struct list_head *)buf, &message_list);
buf = NULL;
}
}
exit:
named_distribute(&message_list, node, &publ_cluster, max_item_buf);
named_distribute(&message_list, node, &publ_zone, max_item_buf);
read_unlock_bh(&tipc_nametbl_lock);

tipc_link_send_names(&message_list, (u32)node);
Expand Down Expand Up @@ -316,21 +355,23 @@ void tipc_named_recv(struct sk_buff *buf)
}

/**
* tipc_named_reinit - re-initialize local publication list
* tipc_named_reinit - re-initialize local publications
*
* This routine is called whenever TIPC networking is enabled.
* All existing publications by this node that have "cluster" or "zone" scope
* are updated to reflect the node's new network address.
* All name table entries published by this node are updated to reflect
* the node's new network address.
*/

void tipc_named_reinit(void)
{
struct publication *publ;
int scope;

write_lock_bh(&tipc_nametbl_lock);

list_for_each_entry(publ, &publ_root, local_list)
publ->node = tipc_own_addr;
for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
publ->node = tipc_own_addr;

write_unlock_bh(&tipc_nametbl_lock);
}
14 changes: 7 additions & 7 deletions net/tipc/name_table.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
info->cluster_list_size++;
}

if (node == tipc_own_addr) {
if (in_own_node(node)) {
list_add(&publ->node_list, &info->node_list);
info->node_list_size++;
}
Expand Down Expand Up @@ -418,7 +418,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i

/* Remove publication from node scope list, if present */

if (node == tipc_own_addr) {
if (in_own_node(node)) {
list_del(&publ->node_list);
info->node_list_size--;
}
Expand Down Expand Up @@ -604,7 +604,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
publ = list_first_entry(&info->node_list, struct publication,
node_list);
list_move_tail(&publ->node_list, &info->node_list);
} else if (in_own_cluster(*destnode)) {
} else if (in_own_cluster_exact(*destnode)) {
if (list_empty(&info->cluster_list))
goto no_match;
publ = list_first_entry(&info->cluster_list, struct publication,
Expand Down Expand Up @@ -695,11 +695,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
}

write_lock_bh(&tipc_nametbl_lock);
table.local_publ_count++;
publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
tipc_own_addr, port_ref, key);
if (publ && (scope != TIPC_NODE_SCOPE))
if (likely(publ)) {
table.local_publ_count++;
tipc_named_publish(publ);
}
write_unlock_bh(&tipc_nametbl_lock);
return publ;
}
Expand All @@ -716,8 +717,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
if (likely(publ)) {
table.local_publ_count--;
if (publ->scope != TIPC_NODE_SCOPE)
tipc_named_withdraw(publ);
tipc_named_withdraw(publ);
write_unlock_bh(&tipc_nametbl_lock);
list_del_init(&publ->pport_list);
kfree(publ);
Expand Down
3 changes: 2 additions & 1 deletion net/tipc/net.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,11 +178,12 @@ int tipc_net_start(u32 addr)
tipc_subscr_stop();
tipc_cfg_stop();

write_lock_bh(&tipc_net_lock);
tipc_own_addr = addr;
tipc_named_reinit();
tipc_port_reinit();

tipc_bclink_init();
write_unlock_bh(&tipc_net_lock);

tipc_k_signal((Handler)tipc_subscr_start, 0);
tipc_k_signal((Handler)tipc_cfg_init, 0);
Expand Down
2 changes: 1 addition & 1 deletion net/tipc/node.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ struct tipc_node *tipc_node_find(u32 addr)
struct tipc_node *node;
struct hlist_node *pos;

if (unlikely(!in_own_cluster(addr)))
if (unlikely(!in_own_cluster_exact(addr)))
return NULL;

hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
Expand Down
2 changes: 1 addition & 1 deletion net/tipc/node_subscr.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
void *usr_handle, net_ev_handler handle_down)
{
if (addr == tipc_own_addr) {
if (in_own_node(addr)) {
node_sub->node = NULL;
return;
}
Expand Down
Loading

0 comments on commit 167de77

Please sign in to comment.