]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net: dsa: flush switchdev workqueue before tearing down CPU/DSA ports
authorVladimir Oltean <vladimir.oltean@nxp.com>
Tue, 14 Sep 2021 13:47:26 +0000 (16:47 +0300)
committerJakub Kicinski <kuba@kernel.org>
Wed, 15 Sep 2021 22:09:46 +0000 (15:09 -0700)
Sometimes when unbinding the mv88e6xxx driver on Turris MOX, these error
messages appear:

mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete be:79:b4:9e:9e:96 vid 1 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete be:79:b4:9e:9e:96 vid 0 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete d8:58:d7:00:ca:6d vid 100 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete d8:58:d7:00:ca:6d vid 1 from fdb: -2
mv88e6085 d0032004.mdio-mii:12: port 1 failed to delete d8:58:d7:00:ca:6d vid 0 from fdb: -2

(and similarly for other ports)

What happens is that DSA has a policy "even if there are bugs, let's at
least not leak memory" and dsa_port_teardown() clears the dp->fdbs and
dp->mdbs lists, which are supposed to be empty.

But deleting that cleanup code, the warnings go away.

=> the FDB and MDB lists (used for refcounting on shared ports, aka CPU
and DSA ports) will eventually be empty, but are not empty by the time
we tear down those ports. Aka we are deleting them too soon.

The addresses that DSA complains about are host-trapped addresses: the
local addresses of the ports, and the MAC address of the bridge device.

The problem is that offloading those entries happens from a deferred
work item scheduled by the SWITCHDEV_FDB_DEL_TO_DEVICE handler, and this
races with the teardown of the CPU and DSA ports where the refcounting
is kept.

In fact, not only it races, but fundamentally speaking, if we iterate
through the port list linearly, we might end up tearing down the shared
ports even before we delete a DSA user port which has a bridge upper.

So as it turns out, we need to first tear down the user ports (and the
unused ones, for no better place of doing that), then the shared ports
(the CPU and DSA ports). In between, we need to ensure that all work
items scheduled by our switchdev handlers (which only run for user
ports, hence the reason why we tear them down first) have finished.

Fixes: 3f80696f38ab ("net: dsa: reference count the MDB entries at the cross-chip notifier level")
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/20210914134726.2305133-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/net/dsa.h
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h

index f9a17145255a3606d434db0e0b2ef6ec0bf76e51..258867eff2309204d6ba409f6f84f7164ef11c94 100644 (file)
@@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp)
        return dp->type == DSA_PORT_TYPE_USER;
 }
 
+static inline bool dsa_port_is_unused(struct dsa_port *dp)
+{
+       return dp->type == DSA_PORT_TYPE_UNUSED;
+}
+
 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
 {
        return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
index 1dc45e40f961c319bf3954642b997b61a22c1265..41f36ad8b0ec674f39ce4904763dc618ebe233d1 100644 (file)
@@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work)
        return queue_work(dsa_owq, work);
 }
 
+void dsa_flush_workqueue(void)
+{
+       flush_workqueue(dsa_owq);
+}
+
 int dsa_devlink_param_get(struct devlink *dl, u32 id,
                          struct devlink_param_gset_ctx *ctx)
 {
index 1b2b25d7bd025c13102c6629dbfe3c131ba50db8..eef13cd20f19f40a5ec0eb37a01ce3cc0c7bfaec 100644 (file)
@@ -897,6 +897,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
        ds->setup = false;
 }
 
+/* First tear down the non-shared, then the shared ports. This ensures that
+ * all work items scheduled by our switchdev handlers for user ports have
+ * completed before we destroy the refcounting kept on the shared ports.
+ */
+static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
+                       dsa_port_teardown(dp);
+
+       dsa_flush_workqueue();
+
+       list_for_each_entry(dp, &dst->ports, list)
+               if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
+                       dsa_port_teardown(dp);
+}
+
+static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
+{
+       struct dsa_port *dp;
+
+       list_for_each_entry(dp, &dst->ports, list)
+               dsa_switch_teardown(dp->ds);
+}
+
 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -923,26 +950,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
        return 0;
 
 teardown:
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
+       dsa_tree_teardown_ports(dst);
 
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
+       dsa_tree_teardown_switches(dst);
 
        return err;
 }
 
-static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
-{
-       struct dsa_port *dp;
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_port_teardown(dp);
-
-       list_for_each_entry(dp, &dst->ports, list)
-               dsa_switch_teardown(dp->ds);
-}
-
 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 {
        struct dsa_port *dp;
@@ -1052,6 +1066,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_master(dst);
 
+       dsa_tree_teardown_ports(dst);
+
        dsa_tree_teardown_switches(dst);
 
        dsa_tree_teardown_cpu_ports(dst);
index 33ab7d7af9eb4dc4e18ab99988459da1bae36ec7..a5c9bc7b66c6eb37c7552b55a9d56302a71266d2 100644 (file)
@@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
 
 bool dsa_schedule_work(struct work_struct *work);
+void dsa_flush_workqueue(void);
 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
 
 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)