Skip to content

Commit

Permalink
tipc: convert node lock to rwlock
Browse files Browse the repository at this point in the history
According to the node FSM a node in state SELF_UP_PEER_UP cannot
change state inside a lock context, except when a TUNNEL_PROTOCOL
(SYNCH or FAILOVER) packet arrives. However, the node's individual
links may still change state.

Since each link now is protected by its own spinlock, we finally have
the conditions in place to convert the node spinlock to an rwlock_t.
If the node state and arriving packet type are rigth, we can let the
link directly receive the packet under protection of its own spinlock
and the node lock in read mode. In all other cases we use the node
lock in write mode. This enables full concurrent execution between
parallel links during steady-state traffic situations, i.e., 99+ %
of the time.

This commit implements this change.

Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Jon Paul Maloy authored and David S. Miller committed Nov 20, 2015
1 parent 2312bf6 commit 5405ff6
Show file tree
Hide file tree
Showing 3 changed files with 136 additions and 133 deletions.
32 changes: 16 additions & 16 deletions net/tipc/link.c
Original file line number Diff line number Diff line change
Expand Up @@ -1547,7 +1547,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
*bearer_id = 0;
rcu_read_lock();
list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
tipc_node_lock(n_ptr);
tipc_node_read_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
l_ptr = n_ptr->links[i].link;
if (l_ptr && !strcmp(l_ptr->name, link_name)) {
Expand All @@ -1556,7 +1556,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
break;
}
}
tipc_node_unlock(n_ptr);
tipc_node_read_unlock(n_ptr);
if (found_node)
break;
}
Expand Down Expand Up @@ -1658,7 +1658,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
if (!node)
return -EINVAL;

tipc_node_lock(node);
tipc_node_read_lock(node);

link = node->links[bearer_id].link;
if (!link) {
Expand Down Expand Up @@ -1699,7 +1699,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
}

out:
tipc_node_unlock(node);
tipc_node_read_unlock(node);

return res;
}
Expand Down Expand Up @@ -1898,10 +1898,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)

list_for_each_entry_continue_rcu(node, &tn->node_list,
list) {
tipc_node_lock(node);
tipc_node_read_lock(node);
err = __tipc_nl_add_node_links(net, &msg, node,
&prev_link);
tipc_node_unlock(node);
tipc_node_read_unlock(node);
if (err)
goto out;

Expand All @@ -1913,10 +1913,10 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto out;

list_for_each_entry_rcu(node, &tn->node_list, list) {
tipc_node_lock(node);
tipc_node_read_lock(node);
err = __tipc_nl_add_node_links(net, &msg, node,
&prev_link);
tipc_node_unlock(node);
tipc_node_read_unlock(node);
if (err)
goto out;

Expand Down Expand Up @@ -1967,16 +1967,16 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
if (!node)
return -EINVAL;

tipc_node_lock(node);
tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (!link) {
tipc_node_unlock(node);
tipc_node_read_unlock(node);
nlmsg_free(msg.skb);
return -EINVAL;
}

err = __tipc_nl_add_link(net, &msg, link, 0);
tipc_node_unlock(node);
tipc_node_read_unlock(node);
if (err) {
nlmsg_free(msg.skb);
return err;
Expand Down Expand Up @@ -2021,18 +2021,18 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
node = tipc_link_find_owner(net, link_name, &bearer_id);
if (!node)
return -EINVAL;

le = &node->links[bearer_id];
tipc_node_lock(node);
tipc_node_read_lock(node);
spin_lock_bh(&le->lock);
link = le->link;
if (!link) {
tipc_node_unlock(node);
spin_unlock_bh(&le->lock);
tipc_node_read_unlock(node);
return -EINVAL;
}

link_reset_statistics(link);
spin_unlock_bh(&le->lock);
tipc_node_unlock(node);

tipc_node_read_unlock(node);
return 0;
}
Loading

0 comments on commit 5405ff6

Please sign in to comment.