Skip to content

Commit

Permalink
net: dsa: allow masters to join a LAG
Browse files Browse the repository at this point in the history
There are 2 ways in which a DSA user port may become handled by 2 CPU
ports in a LAG:

(1) its current DSA master joins a LAG

 ip link del bond0 && ip link add bond0 type bond mode 802.3ad
 ip link set eno2 master bond0

When this happens, all user ports with "eno2" as DSA master get
automatically migrated to "bond0" as DSA master.

(2) it is explicitly configured as such by the user

 # Before, the DSA master was eno3
 ip link set swp0 type dsa master bond0

The design of this configuration is that the LAG device dynamically
becomes a DSA master through dsa_master_setup() when the first physical
DSA master becomes a LAG slave, and stops being so through
dsa_master_teardown() when the last physical DSA master leaves.

A LAG interface is considered as a valid DSA master only if it contains
existing DSA masters, and no other lower interfaces. Therefore, we
mainly rely on method (1) to enter this configuration.

Each physical DSA master (LAG slave) retains its dev->dsa_ptr for when
it becomes a standalone DSA master again. But the LAG master also has a
dev->dsa_ptr, and this is actually duplicated from one of the physical
LAG slaves, and therefore needs to be balanced when LAG slaves come and
go.

To the switch driver, putting DSA masters in a LAG is seen as putting
their associated CPU ports in a LAG.

We need to prepare cross-chip host FDB notifiers for CPU ports in a LAG,
by calling the driver's ->lag_fdb_add method rather than ->port_fdb_add.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
  • Loading branch information
Vladimir Oltean authored and Paolo Abeni committed Sep 20, 2022
1 parent 2e359b0 commit acc43b7
Show file tree
Hide file tree
Showing 6 changed files with 310 additions and 10 deletions.
12 changes: 12 additions & 0 deletions include/net/dsa.h
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,9 @@ struct dsa_port {
u8 master_admin_up:1;
u8 master_oper_up:1;

/* Valid only on user ports */
u8 cpu_port_in_lag:1;

u8 setup:1;

struct device_node *dn;
Expand Down Expand Up @@ -724,6 +727,9 @@ static inline bool dsa_port_offloads_lag(struct dsa_port *dp,

static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
{
if (dp->cpu_port_in_lag)
return dsa_port_lag_dev_get(dp->cpu_dp);

return dp->cpu_dp->master;
}

Expand Down Expand Up @@ -811,6 +817,12 @@ dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
return false;
}

static inline bool dsa_port_tree_same(const struct dsa_port *a,
const struct dsa_port *b)
{
return a->ds->dst == b->ds->dst;
}

typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
Expand Down
5 changes: 5 additions & 0 deletions net/dsa/dsa_priv.h
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,11 @@ static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
/* master.c */
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
void dsa_master_teardown(struct net_device *dev);
int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack);
void dsa_master_lag_teardown(struct net_device *lag_dev,
struct dsa_port *cpu_dp);

static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
int device, int port)
Expand Down
49 changes: 49 additions & 0 deletions net/dsa/master.c
Original file line number Diff line number Diff line change
Expand Up @@ -428,3 +428,52 @@ void dsa_master_teardown(struct net_device *dev)
*/
wmb();
}

int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
{
bool master_setup = false;
int err;

if (!netdev_uses_dsa(lag_dev)) {
err = dsa_master_setup(lag_dev, cpu_dp);
if (err)
return err;

master_setup = true;
}

err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
if (err) {
if (extack && !extack->_msg)
NL_SET_ERR_MSG_MOD(extack,
"CPU port failed to join LAG");
goto out_master_teardown;
}

return 0;

out_master_teardown:
if (master_setup)
dsa_master_teardown(lag_dev);
return err;
}

/* Tear down a master if there isn't any other user port on it,
* optionally also destroying LAG information.
*/
void dsa_master_lag_teardown(struct net_device *lag_dev,
struct dsa_port *cpu_dp)
{
struct net_device *upper;
struct list_head *iter;

dsa_port_lag_leave(cpu_dp, lag_dev);

netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
if (dsa_slave_dev_check(upper))
return;

dsa_master_teardown(lag_dev);
}
1 change: 1 addition & 0 deletions net/dsa/port.c
Original file line number Diff line number Diff line change
Expand Up @@ -1393,6 +1393,7 @@ static int dsa_port_assign_master(struct dsa_port *dp,
return err;

dp->cpu_dp = master->dsa_ptr;
dp->cpu_port_in_lag = netif_is_lag_master(master);

return 0;
}
Expand Down
231 changes: 225 additions & 6 deletions net/dsa/slave.c
Original file line number Diff line number Diff line change
Expand Up @@ -2818,11 +2818,45 @@ dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
return NOTIFY_DONE;
}

/* To be eligible as a DSA master, a LAG must have all lower interfaces be
* eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
* switches in the same switch tree.
*/
static int dsa_lag_master_validate(struct net_device *lag_dev,
struct netlink_ext_ack *extack)
{
struct net_device *lower1, *lower2;
struct list_head *iter1, *iter2;

netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
if (!netdev_uses_dsa(lower1) ||
!netdev_uses_dsa(lower2)) {
NL_SET_ERR_MSG_MOD(extack,
"All LAG ports must be eligible as DSA masters");
return notifier_from_errno(-EINVAL);
}

if (lower1 == lower2)
continue;

if (!dsa_port_tree_same(lower1->dsa_ptr,
lower2->dsa_ptr)) {
NL_SET_ERR_MSG_MOD(extack,
"LAG contains DSA masters of disjoint switch trees");
return notifier_from_errno(-EINVAL);
}
}
}

return NOTIFY_DONE;
}

static int
dsa_master_prechangeupper_sanity_check(struct net_device *master,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);

if (!netdev_uses_dsa(master))
return NOTIFY_DONE;
Expand All @@ -2840,13 +2874,51 @@ dsa_master_prechangeupper_sanity_check(struct net_device *master,
if (netif_is_bridge_master(info->upper_dev))
return NOTIFY_DONE;

extack = netdev_notifier_info_to_extack(&info->info);
/* Allow LAG uppers, subject to further restrictions in
* dsa_lag_master_prechangelower_sanity_check()
*/
if (netif_is_lag_master(info->upper_dev))
return dsa_lag_master_validate(info->upper_dev, extack);

NL_SET_ERR_MSG_MOD(extack,
"DSA master cannot join unknown upper interfaces");
return notifier_from_errno(-EBUSY);
}

static int
dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
struct net_device *lag_dev = info->upper_dev;
struct net_device *lower;
struct list_head *iter;

if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
return NOTIFY_DONE;

if (!info->linking)
return NOTIFY_DONE;

if (!netdev_uses_dsa(dev)) {
NL_SET_ERR_MSG(extack,
"Only DSA masters can join a LAG DSA master");
return notifier_from_errno(-EINVAL);
}

netdev_for_each_lower_dev(lag_dev, lower, iter) {
if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
NL_SET_ERR_MSG(extack,
"Interface is DSA master for a different switch tree than this LAG");
return notifier_from_errno(-EINVAL);
}

break;
}

return NOTIFY_DONE;
}

/* Don't allow bridging of DSA masters, since the bridge layer rx_handler
* prevents the DSA fake ethertype handler to be invoked, so we don't get the
* chance to strip off and parse the DSA switch tag protocol header (the bridge
Expand Down Expand Up @@ -2887,6 +2959,136 @@ dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
return NOTIFY_DONE;
}

static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
struct net_device *lag_dev)
{
struct net_device *new_master = dsa_tree_find_first_master(dst);
struct dsa_port *dp;
int err;

dsa_tree_for_each_user_port(dp, dst) {
if (dsa_port_to_master(dp) != lag_dev)
continue;

err = dsa_slave_change_master(dp->slave, new_master, NULL);
if (err) {
netdev_err(dp->slave,
"failed to restore master to %s: %pe\n",
new_master->name, ERR_PTR(err));
}
}
}

static int dsa_master_lag_join(struct net_device *master,
struct net_device *lag_dev,
struct netdev_lag_upper_info *uinfo,
struct netlink_ext_ack *extack)
{
struct dsa_port *cpu_dp = master->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *dp;
int err;

err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
if (err)
return err;

dsa_tree_for_each_user_port(dp, dst) {
if (dsa_port_to_master(dp) != master)
continue;

err = dsa_slave_change_master(dp->slave, lag_dev, extack);
if (err)
goto restore;
}

return 0;

restore:
dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
if (dsa_port_to_master(dp) != lag_dev)
continue;

err = dsa_slave_change_master(dp->slave, master, NULL);
if (err) {
netdev_err(dp->slave,
"failed to restore master to %s: %pe\n",
master->name, ERR_PTR(err));
}
}

dsa_master_lag_teardown(lag_dev, master->dsa_ptr);

return err;
}

static void dsa_master_lag_leave(struct net_device *master,
struct net_device *lag_dev)
{
struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
struct dsa_switch_tree *dst = cpu_dp->dst;
struct dsa_port *new_cpu_dp = NULL;
struct net_device *lower;
struct list_head *iter;

netdev_for_each_lower_dev(lag_dev, lower, iter) {
if (netdev_uses_dsa(lower)) {
new_cpu_dp = lower->dsa_ptr;
break;
}
}

if (new_cpu_dp) {
/* Update the CPU port of the user ports still under the LAG
* so that dsa_port_to_master() continues to work properly
*/
dsa_tree_for_each_user_port(dp, dst)
if (dsa_port_to_master(dp) == lag_dev)
dp->cpu_dp = new_cpu_dp;

/* Update the index of the virtual CPU port to match the lowest
* physical CPU port
*/
lag_dev->dsa_ptr = new_cpu_dp;
wmb();
} else {
/* If the LAG DSA master has no ports left, migrate back all
* user ports to the first physical CPU port
*/
dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
}

/* This DSA master has left its LAG in any case, so let
* the CPU port leave the hardware LAG as well
*/
dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
}

static int dsa_master_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;

if (!netdev_uses_dsa(dev))
return err;

extack = netdev_notifier_info_to_extack(&info->info);

if (netif_is_lag_master(info->upper_dev)) {
if (info->linking) {
err = dsa_master_lag_join(dev, info->upper_dev,
info->upper_info, extack);
err = notifier_from_errno(err);
} else {
dsa_master_lag_leave(dev, info->upper_dev);
err = NOTIFY_OK;
}
}

return err;
}

static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
Expand All @@ -2905,6 +3107,10 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (notifier_to_errno(err))
return err;

err = dsa_lag_master_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;

err = dsa_bridge_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
Expand All @@ -2930,19 +3136,32 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (notifier_to_errno(err))
return err;

err = dsa_master_changeupper(dev, ptr);
if (notifier_to_errno(err))
return err;

break;
}
case NETDEV_CHANGELOWERSTATE: {
struct netdev_notifier_changelowerstate_info *info = ptr;
struct dsa_port *dp;
int err;

if (!dsa_slave_dev_check(dev))
break;
if (dsa_slave_dev_check(dev)) {
dp = dsa_slave_to_port(dev);

err = dsa_port_lag_change(dp, info->lower_state_info);
}

dp = dsa_slave_to_port(dev);
/* Mirror LAG port events on DSA masters that are in
* a LAG towards their respective switch CPU ports
*/
if (netdev_uses_dsa(dev)) {
dp = dev->dsa_ptr;

err = dsa_port_lag_change(dp, info->lower_state_info);
}

err = dsa_port_lag_change(dp, info->lower_state_info);
return notifier_from_errno(err);
}
case NETDEV_CHANGE:
Expand Down
Loading

0 comments on commit acc43b7

Please sign in to comment.