Skip to content

Commit

Permalink
Merge branch 'team' ("add support for peer notifications and igmp rej…
Browse files Browse the repository at this point in the history
…oins for team")

Jiri Pirko says:

====================
The middle patch adjusts core infrastructure so the bonding code can be
generalized and reused by team.

v1->v2: using msecs_to_jiffies() as suggested by Eric

Jiri Pirko (3):
  team: add peer notification
  net: convert resend IGMP to notifier event
  team: add support for sending multicast rejoins
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Jul 23, 2013
2 parents ab2cfbb + 492b200 commit 45c9149
Show file tree
Hide file tree
Showing 8 changed files with 245 additions and 43 deletions.
44 changes: 9 additions & 35 deletions drivers/net/bonding/bond_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -715,49 +715,19 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
return err;
}

static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;

in_dev = __in_dev_get_rcu(dev);
if (in_dev)
ip_mc_rejoin_groups(in_dev);
}

/*
* Retrieve the list of registered multicast addresses for the bonding
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;

read_lock(&bond->lock);
rcu_read_lock();

bond_dev = bond->dev;

/* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond_dev);

/*
* if bond is enslaved to a bridge,
* then rejoin all groups on its master
*/
upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
__bond_resend_igmp_join_requests(upper_dev);

/* rejoin all groups on vlan devices */
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
vlan->vlan_id);
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
return;
}
rcu_read_unlock();
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
rtnl_unlock();

/* We use curr_slave_lock to protect against concurrent access to
* igmp_retrans from multiple running instances of this function and
Expand Down Expand Up @@ -3234,6 +3204,10 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
break;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, slave->bond->dev);
break;
default:
break;
}
Expand Down
176 changes: 176 additions & 0 deletions drivers/net/team/team.c
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
}


/*********************
* Peers notification
*********************/

static void team_notify_peers_work(struct work_struct *work)
{
struct team *team;

team = container_of(work, struct team, notify_peers.dw.work);

if (!rtnl_trylock()) {
schedule_delayed_work(&team->notify_peers.dw, 0);
return;
}
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock();
if (!atomic_dec_and_test(&team->notify_peers.count_pending))
schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval));
}

static void team_notify_peers(struct team *team)
{
if (!team->notify_peers.count || !netif_running(team->dev))
return;
atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
schedule_delayed_work(&team->notify_peers.dw, 0);
}

static void team_notify_peers_init(struct team *team)
{
INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
}

static void team_notify_peers_fini(struct team *team)
{
cancel_delayed_work_sync(&team->notify_peers.dw);
}


/*******************************
* Send multicast group rejoins
*******************************/

static void team_mcast_rejoin_work(struct work_struct *work)
{
struct team *team;

team = container_of(work, struct team, mcast_rejoin.dw.work);

if (!rtnl_trylock()) {
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock();
if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval));
}

static void team_mcast_rejoin(struct team *team)
{
if (!team->mcast_rejoin.count || !netif_running(team->dev))
return;
atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
}

static void team_mcast_rejoin_init(struct team *team)
{
INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
}

static void team_mcast_rejoin_fini(struct team *team)
{
cancel_delayed_work_sync(&team->mcast_rejoin.dw);
}


/************************
* Rx path frame handler
************************/
Expand Down Expand Up @@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
team_notify_peers(team);
team_mcast_rejoin(team);
}

static void __reconstruct_port_hlist(struct team *team, int rm_index)
Expand Down Expand Up @@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
team_notify_peers(team);
team_mcast_rejoin(team);
}

#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
Expand Down Expand Up @@ -1205,6 +1289,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
return team_change_mode(team, ctx->data.str_val);
}

static int team_notify_peers_count_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->notify_peers.count;
return 0;
}

static int team_notify_peers_count_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->notify_peers.count = ctx->data.u32_val;
return 0;
}

static int team_notify_peers_interval_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->notify_peers.interval;
return 0;
}

static int team_notify_peers_interval_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->notify_peers.interval = ctx->data.u32_val;
return 0;
}

static int team_mcast_rejoin_count_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->mcast_rejoin.count;
return 0;
}

static int team_mcast_rejoin_count_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->mcast_rejoin.count = ctx->data.u32_val;
return 0;
}

static int team_mcast_rejoin_interval_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->mcast_rejoin.interval;
return 0;
}

static int team_mcast_rejoin_interval_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->mcast_rejoin.interval = ctx->data.u32_val;
return 0;
}

static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
Expand Down Expand Up @@ -1316,6 +1456,30 @@ static const struct team_option team_options[] = {
.getter = team_mode_option_get,
.setter = team_mode_option_set,
},
{
.name = "notify_peers_count",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_notify_peers_count_get,
.setter = team_notify_peers_count_set,
},
{
.name = "notify_peers_interval",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_notify_peers_interval_get,
.setter = team_notify_peers_interval_set,
},
{
.name = "mcast_rejoin_count",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_mcast_rejoin_count_get,
.setter = team_mcast_rejoin_count_set,
},
{
.name = "mcast_rejoin_interval",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_mcast_rejoin_interval_get,
.setter = team_mcast_rejoin_interval_set,
},
{
.name = "enabled",
.type = TEAM_OPTION_TYPE_BOOL,
Expand Down Expand Up @@ -1396,6 +1560,10 @@ static int team_init(struct net_device *dev)

INIT_LIST_HEAD(&team->option_list);
INIT_LIST_HEAD(&team->option_inst_list);

team_notify_peers_init(team);
team_mcast_rejoin_init(team);

err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
if (err)
goto err_options_register;
Expand All @@ -1406,6 +1574,8 @@ static int team_init(struct net_device *dev)
return 0;

err_options_register:
team_mcast_rejoin_fini(team);
team_notify_peers_fini(team);
team_queue_override_fini(team);
err_team_queue_override_init:
free_percpu(team->pcpu_stats);
Expand All @@ -1425,6 +1595,8 @@ static void team_uninit(struct net_device *dev)

__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
team_mcast_rejoin_fini(team);
team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
Expand Down Expand Up @@ -2698,6 +2870,10 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid to change type of underlaying device */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, port->team->dev);
break;
}
return NOTIFY_DONE;
}
Expand Down
14 changes: 13 additions & 1 deletion include/linux/if_team.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
#ifndef _LINUX_IF_TEAM_H_
#define _LINUX_IF_TEAM_H_


#include <linux/netpoll.h>
#include <net/sch_generic.h>
#include <linux/types.h>
#include <uapi/linux/if_team.h>

struct team_pcpu_stats {
Expand Down Expand Up @@ -194,6 +194,18 @@ struct team {
bool user_carrier_enabled;
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
struct {
unsigned int count;
unsigned int interval; /* in ms */
atomic_t count_pending;
struct delayed_work dw;
} notify_peers;
struct {
unsigned int count;
unsigned int interval; /* in ms */
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};

Expand Down
1 change: 0 additions & 1 deletion include/linux/igmp.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,5 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_rejoin_groups(struct in_device *in_dev);

#endif
1 change: 1 addition & 0 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -1633,6 +1633,7 @@ struct packet_offload {
#define NETDEV_NOTIFY_PEERS 0x0013
#define NETDEV_JOIN 0x0014
#define NETDEV_CHANGEUPPER 0x0015
#define NETDEV_RESEND_IGMP 0x0016

extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
Expand Down
1 change: 1 addition & 0 deletions net/8021q/vlan.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,

case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
case NETDEV_RESEND_IGMP:
/* Propagate to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
call_netdevice_notifiers(event, vlandev);
Expand Down
5 changes: 5 additions & 0 deletions net/bridge/br_notify.c
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;

case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, br->dev);
break;
}

/* Events that may cause spanning tree to refresh */
Expand Down
Loading

0 comments on commit 45c9149

Please sign in to comment.