[PATCH v2 net-next 08/10] net: dsa: allow masters to join a LAG

From: Vladimir Oltean
Date: Sat Sep 10 2022 - 21:09:45 EST


There are 2 ways in which a DSA user port may become handled by 2 CPU
ports in a LAG:

(1) its current DSA master joins a LAG

ip link del bond0 && ip link add bond0 type bond mode 802.3ad
ip link set eno2 master bond0

When this happens, all user ports with "eno2" as DSA master get
automatically migrated to "bond0" as DSA master.

(2) it is explicitly configured as such by the user

# Before, the DSA master was eno3
ip link set swp0 type dsa master bond0

The design of this configuration is that the LAG device dynamically
becomes a DSA master through dsa_master_setup() when the first physical
DSA master becomes a LAG slave, and stops being so through
dsa_master_teardown() when the last physical DSA master leaves.

A LAG interface is considered as a valid DSA master only if it contains
existing DSA masters, and no other lower interfaces. Therefore, we
mainly rely on method (1) to enter this configuration.

Each physical DSA master (LAG slave) retains its dev->dsa_ptr for when
it becomes a standalone DSA master again. But the LAG master also has a
dev->dsa_ptr, and this is actually duplicated from one of the physical
LAG slaves, and therefore needs to be balanced when LAG slaves come and
go.

To the switch driver, putting DSA masters in a LAG is seen as putting
their associated CPU ports in a LAG.

We need to prepare cross-chip host FDB notifiers for CPU ports in a LAG,
by calling the driver's ->lag_fdb_add method rather than ->port_fdb_add.

Signed-off-by: Vladimir Oltean <vladimir.oltean@xxxxxxx>
---
v1->v2:
- add restriction that LAG slaves must be DSA masters of the same switch
tree
- move DSA master eligibility restriction earlier, from
dsa_master_lag_setup() to dsa_master_prechangeupper_sanity_check()
- set master_setup = true in dsa_master_lag_setup(), so that
dsa_master_teardown() would actually get called
- don't overwrite extack in dsa_master_lag_setup() if
dsa_port_lag_join() provided a more specific one
- add restriction that interfaces which aren't DSA masters cannot join a
LAG DSA master

include/net/dsa.h | 12 +++
net/dsa/dsa_priv.h | 5 +
net/dsa/master.c | 49 ++++++++++
net/dsa/port.c | 1 +
net/dsa/slave.c | 231 +++++++++++++++++++++++++++++++++++++++++++--
net/dsa/switch.c | 22 ++++-
6 files changed, 310 insertions(+), 10 deletions(-)

diff --git a/include/net/dsa.h b/include/net/dsa.h
index 1c80e75b3cad..d777eac5694f 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -300,6 +300,9 @@ struct dsa_port {
u8 master_admin_up:1;
u8 master_oper_up:1;

+ /* Valid only on user ports */
+ u8 cpu_port_in_lag:1;
+
u8 setup:1;

struct device_node *dn;
@@ -724,6 +727,9 @@ static inline bool dsa_port_offloads_lag(struct dsa_port *dp,

static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
{
+ if (dp->cpu_port_in_lag)
+ return dsa_port_lag_dev_get(dp->cpu_dp);
+
return dp->cpu_dp->master;
}

@@ -811,6 +817,12 @@ dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
return false;
}

+static inline bool dsa_port_tree_same(const struct dsa_port *a,
+ const struct dsa_port *b)
+{
+ return a->ds->dst == b->ds->dst;
+}
+
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index f0ae54d0435e..129e4a649c7e 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -185,6 +185,11 @@ static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
/* master.c */
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
void dsa_master_teardown(struct net_device *dev);
+int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack);
+void dsa_master_lag_teardown(struct net_device *lag_dev,
+ struct dsa_port *cpu_dp);

static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
int device, int port)
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 2176c14b97a8..40367ab41cf8 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -428,3 +428,52 @@ void dsa_master_teardown(struct net_device *dev)
*/
wmb();
}
+
+int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack)
+{
+ bool master_setup = false;
+ int err;
+
+ if (!netdev_uses_dsa(lag_dev)) {
+ err = dsa_master_setup(lag_dev, cpu_dp);
+ if (err)
+ return err;
+
+ master_setup = true;
+ }
+
+ err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack);
+ if (err) {
+ if (extack && !extack->_msg)
+ NL_SET_ERR_MSG_MOD(extack,
+ "CPU port failed to join LAG");
+ goto out_master_teardown;
+ }
+
+ return 0;
+
+out_master_teardown:
+ if (master_setup)
+ dsa_master_teardown(lag_dev);
+ return err;
+}
+
+/* Tear down a master if there isn't any other user port on it,
+ * optionally also destroying LAG information.
+ */
+void dsa_master_lag_teardown(struct net_device *lag_dev,
+ struct dsa_port *cpu_dp)
+{
+ struct net_device *upper;
+ struct list_head *iter;
+
+ dsa_port_lag_leave(cpu_dp, lag_dev);
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper, iter)
+ if (dsa_slave_dev_check(upper))
+ return;
+
+ dsa_master_teardown(lag_dev);
+}
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 98f7fa0cdd5c..e6289a1db0a0 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -1393,6 +1393,7 @@ static int dsa_port_assign_master(struct dsa_port *dp,
return err;

dp->cpu_dp = master->dsa_ptr;
+ dp->cpu_port_in_lag = netif_is_lag_master(master);

return 0;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 00df6cf07866..aa47ddc19fdf 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -2818,11 +2818,45 @@ dsa_slave_prechangeupper_sanity_check(struct net_device *dev,
return NOTIFY_DONE;
}

+/* To be eligible as a DSA master, a LAG must have all lower interfaces be
+ * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of
+ * switches in the same switch tree.
+ */
+static int dsa_lag_master_validate(struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *lower1, *lower2;
+ struct list_head *iter1, *iter2;
+
+ netdev_for_each_lower_dev(lag_dev, lower1, iter1) {
+ netdev_for_each_lower_dev(lag_dev, lower2, iter2) {
+ if (!netdev_uses_dsa(lower1) ||
+ !netdev_uses_dsa(lower2)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "All LAG ports must be eligible as DSA masters");
+ return notifier_from_errno(-EINVAL);
+ }
+
+ if (lower1 == lower2)
+ continue;
+
+ if (!dsa_port_tree_same(lower1->dsa_ptr,
+ lower2->dsa_ptr)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG contains DSA masters of disjoint switch trees");
+ return notifier_from_errno(-EINVAL);
+ }
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
static int
dsa_master_prechangeupper_sanity_check(struct net_device *master,
struct netdev_notifier_changeupper_info *info)
{
- struct netlink_ext_ack *extack;
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);

if (!netdev_uses_dsa(master))
return NOTIFY_DONE;
@@ -2840,13 +2874,51 @@ dsa_master_prechangeupper_sanity_check(struct net_device *master,
if (netif_is_bridge_master(info->upper_dev))
return NOTIFY_DONE;

- extack = netdev_notifier_info_to_extack(&info->info);
+ /* Allow LAG uppers, subject to further restrictions in
+ * dsa_lag_master_prechangelower_sanity_check()
+ */
+ if (netif_is_lag_master(info->upper_dev))
+ return dsa_lag_master_validate(info->upper_dev, extack);

NL_SET_ERR_MSG_MOD(extack,
"DSA master cannot join unknown upper interfaces");
return notifier_from_errno(-EBUSY);
}

+static int
+dsa_lag_master_prechangelower_sanity_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info);
+ struct net_device *lag_dev = info->upper_dev;
+ struct net_device *lower;
+ struct list_head *iter;
+
+ if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev))
+ return NOTIFY_DONE;
+
+ if (!info->linking)
+ return NOTIFY_DONE;
+
+ if (!netdev_uses_dsa(dev)) {
+ NL_SET_ERR_MSG(extack,
+ "Only DSA masters can join a LAG DSA master");
+ return notifier_from_errno(-EINVAL);
+ }
+
+ netdev_for_each_lower_dev(lag_dev, lower, iter) {
+ if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) {
+ NL_SET_ERR_MSG(extack,
+ "Interface is DSA master for a different switch tree than this LAG");
+ return notifier_from_errno(-EINVAL);
+ }
+
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
/* Don't allow bridging of DSA masters, since the bridge layer rx_handler
* prevents the DSA fake ethertype handler to be invoked, so we don't get the
* chance to strip off and parse the DSA switch tag protocol header (the bridge
@@ -2887,6 +2959,136 @@ dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower,
return NOTIFY_DONE;
}

+static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst,
+ struct net_device *lag_dev)
+{
+ struct net_device *new_master = dsa_tree_find_first_master(dst);
+ struct dsa_port *dp;
+ int err;
+
+ dsa_tree_for_each_user_port(dp, dst) {
+ if (dsa_port_to_master(dp) != lag_dev)
+ continue;
+
+ err = dsa_slave_change_master(dp->slave, new_master, NULL);
+ if (err) {
+ netdev_err(dp->slave,
+ "failed to restore master to %s: %pe\n",
+ new_master->name, ERR_PTR(err));
+ }
+ }
+}
+
+static int dsa_master_lag_join(struct net_device *master,
+ struct net_device *lag_dev,
+ struct netdev_lag_upper_info *uinfo,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *cpu_dp = master->dsa_ptr;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ struct dsa_port *dp;
+ int err;
+
+ err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack);
+ if (err)
+ return err;
+
+ dsa_tree_for_each_user_port(dp, dst) {
+ if (dsa_port_to_master(dp) != master)
+ continue;
+
+ err = dsa_slave_change_master(dp->slave, lag_dev, extack);
+ if (err)
+ goto restore;
+ }
+
+ return 0;
+
+restore:
+ dsa_tree_for_each_user_port_continue_reverse(dp, dst) {
+ if (dsa_port_to_master(dp) != lag_dev)
+ continue;
+
+ err = dsa_slave_change_master(dp->slave, master, NULL);
+ if (err) {
+ netdev_err(dp->slave,
+ "failed to restore master to %s: %pe\n",
+ master->name, ERR_PTR(err));
+ }
+ }
+
+ dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
+
+ return err;
+}
+
+static void dsa_master_lag_leave(struct net_device *master,
+ struct net_device *lag_dev)
+{
+ struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr;
+ struct dsa_switch_tree *dst = cpu_dp->dst;
+ struct dsa_port *new_cpu_dp = NULL;
+ struct net_device *lower;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(lag_dev, lower, iter) {
+ if (netdev_uses_dsa(lower)) {
+ new_cpu_dp = lower->dsa_ptr;
+ break;
+ }
+ }
+
+ if (new_cpu_dp) {
+ /* Update the CPU port of the user ports still under the LAG
+ * so that dsa_port_to_master() continues to work properly
+ */
+ dsa_tree_for_each_user_port(dp, dst)
+ if (dsa_port_to_master(dp) == lag_dev)
+ dp->cpu_dp = new_cpu_dp;
+
+ /* Update the index of the virtual CPU port to match the lowest
+ * physical CPU port
+ */
+ lag_dev->dsa_ptr = new_cpu_dp;
+ wmb();
+ } else {
+ /* If the LAG DSA master has no ports left, migrate back all
+ * user ports to the first physical CPU port
+ */
+ dsa_tree_migrate_ports_from_lag_master(dst, lag_dev);
+ }
+
+ /* This DSA master has left its LAG in any case, so let
+ * the CPU port leave the hardware LAG as well
+ */
+ dsa_master_lag_teardown(lag_dev, master->dsa_ptr);
+}
+
+static int dsa_master_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netlink_ext_ack *extack;
+ int err = NOTIFY_DONE;
+
+ if (!netdev_uses_dsa(dev))
+ return err;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking) {
+ err = dsa_master_lag_join(dev, info->upper_dev,
+ info->upper_info, extack);
+ err = notifier_from_errno(err);
+ } else {
+ dsa_master_lag_leave(dev, info->upper_dev);
+ err = NOTIFY_OK;
+ }
+ }
+
+ return err;
+}
+
static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2905,6 +3107,10 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (notifier_to_errno(err))
return err;

+ err = dsa_lag_master_prechangelower_sanity_check(dev, info);
+ if (notifier_to_errno(err))
+ return err;
+
err = dsa_bridge_prechangelower_sanity_check(dev, info);
if (notifier_to_errno(err))
return err;
@@ -2930,6 +3136,10 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
if (notifier_to_errno(err))
return err;

+ err = dsa_master_changeupper(dev, ptr);
+ if (notifier_to_errno(err))
+ return err;
+
break;
}
case NETDEV_CHANGELOWERSTATE: {
@@ -2937,12 +3147,21 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb,
struct dsa_port *dp;
int err;

- if (!dsa_slave_dev_check(dev))
- break;
+ if (dsa_slave_dev_check(dev)) {
+ dp = dsa_slave_to_port(dev);
+
+ err = dsa_port_lag_change(dp, info->lower_state_info);
+ }

- dp = dsa_slave_to_port(dev);
+ /* Mirror LAG port events on DSA masters that are in
+ * a LAG towards their respective switch CPU ports
+ */
+ if (netdev_uses_dsa(dev)) {
+ dp = dev->dsa_ptr;
+
+ err = dsa_port_lag_change(dp, info->lower_state_info);
+ }

- err = dsa_port_lag_change(dp, info->lower_state_info);
return notifier_from_errno(err);
}
case NETDEV_CHANGE:
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index c2cb15e21324..ce56acdba203 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -398,8 +398,15 @@ static int dsa_switch_host_fdb_add(struct dsa_switch *ds,

dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_address_match(dp, info->dp)) {
- err = dsa_port_do_fdb_add(dp, info->addr, info->vid,
- info->db);
+ if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
+ err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
+ info->addr,
+ info->vid,
+ info->db);
+ } else {
+ err = dsa_port_do_fdb_add(dp, info->addr,
+ info->vid, info->db);
+ }
if (err)
break;
}
@@ -419,8 +426,15 @@ static int dsa_switch_host_fdb_del(struct dsa_switch *ds,

dsa_switch_for_each_port(dp, ds) {
if (dsa_port_host_address_match(dp, info->dp)) {
- err = dsa_port_do_fdb_del(dp, info->addr, info->vid,
- info->db);
+ if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
+ err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
+ info->addr,
+ info->vid,
+ info->db);
+ } else {
+ err = dsa_port_do_fdb_del(dp, info->addr,
+ info->vid, info->db);
+ }
if (err)
break;
}
--
2.34.1