#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
+#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+
static const struct dsa_switch_ops sja1105_switch_ops;
static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
int from, int to, bool allow)
{
- if (allow) {
- l2_fwd[from].bc_domain |= BIT(to);
+ if (allow)
l2_fwd[from].reach_port |= BIT(to);
- l2_fwd[from].fl_domain |= BIT(to);
- } else {
- l2_fwd[from].bc_domain &= ~BIT(to);
+ else
l2_fwd[from].reach_port &= ~BIT(to);
- l2_fwd[from].fl_domain &= ~BIT(to);
- }
}
/* Structure used to temporarily transport device tree
static int sja1105_init_static_fdb(struct sja1105_private *priv)
{
+ struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
+ int port;
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
- /* We only populate the FDB table through dynamic
- * L2 Address Lookup entries
+ /* We only populate the FDB table through dynamic L2 Address Lookup
+ * entries, except for a special entry at the end which is a catch-all
+ * for unknown multicast and will be used to control flooding domain.
*/
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
+
+ if (!priv->info->can_limit_mcast_flood)
+ return 0;
+
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+ l2_lookup = table->entries;
+
+ /* All L2 multicast addresses have an odd first octet */
+ l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
+ l2_lookup[0].lockeds = true;
+ l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
+
+ /* Flood multicast to every port by default */
+ for (port = 0; port < priv->ds->num_ports; port++)
+ if (!dsa_is_unused_port(priv->ds, port))
+ l2_lookup[0].destports |= BIT(port);
+
return 0;
}
sja1105_port_allow_traffic(l2fwd, i, upstream, true);
sja1105_port_allow_traffic(l2fwd, upstream, i, true);
+
+ l2fwd[i].bc_domain = BIT(upstream);
+ l2fwd[i].fl_domain = BIT(upstream);
+
+ l2fwd[upstream].bc_domain |= BIT(i);
+ l2fwd[upstream].fl_domain |= BIT(i);
}
/* Next 8 entries define VLAN PCP mapping from ingress to egress.
* Create a one-to-one mapping.
*/
if (!(l2_lookup.destports & BIT(port)))
continue;
+
+ /* We need to hide the FDB entry for unknown multicast */
+ if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
+ l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+ continue;
+
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
case BR_STATE_LEARNING:
mac[port].ingress = true;
mac[port].egress = false;
- mac[port].dyn_learn = true;
+ mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
break;
case BR_STATE_FORWARDING:
mac[port].ingress = true;
mac[port].egress = true;
- mac[port].dyn_learn = true;
+ mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
}
+static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
+ bool enabled)
+{
+ struct sja1105_mac_config_entry *mac;
+ int rc;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+ if (rc)
+ return rc;
+
+ if (enabled)
+ priv->learn_ena |= BIT(port);
+ else
+ priv->learn_ena &= ~BIT(port);
+
+ return 0;
+}
+
+/* Common function for unicast and broadcast flood configuration.
+ * Flooding is configured between each {ingress, egress} port pair, and since
+ * the bridge's semantics are those of "egress flooding", it means we must
+ * enable flooding towards this port from all ingress ports that are in the
+ * same bridge. In practice, we just enable flooding from all possible ingress
+ * ports regardless of whether they're in the same bridge or not, since the
+ * reach_port configuration will not allow flooded frames to leak across
+ * bridging domains anyway.
+ */
+static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ int from, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (from = 0; from < priv->ds->num_ports; from++) {
+ if (dsa_is_unused_port(priv->ds, from))
+ continue;
+ if (from == to)
+ continue;
+
+ /* Unicast */
+ if (flags.mask & BR_FLOOD) {
+ if (flags.val & BR_FLOOD)
+ l2_fwd[from].fl_domain |= BIT(to);
+ else
+ l2_fwd[from].fl_domain &= ~BIT(to);
+ }
+ /* Broadcast */
+ if (flags.mask & BR_BCAST_FLOOD) {
+ if (flags.val & BR_BCAST_FLOOD)
+ l2_fwd[from].bc_domain |= BIT(to);
+ else
+ l2_fwd[from].bc_domain &= ~BIT(to);
+ }
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ from, &l2_fwd[from], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_l2_lookup_entry *l2_lookup;
+ struct sja1105_table *table;
+ int match;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+ l2_lookup = table->entries;
+
+ for (match = 0; match < table->entry_count; match++)
+ if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
+ l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
+ break;
+
+ if (match == table->entry_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Could not find FDB entry for unknown multicast");
+ return -ENOSPC;
+ }
+
+ if (flags.val & BR_MCAST_FLOOD)
+ l2_lookup[match].destports |= BIT(to);
+ else
+ l2_lookup[match].destports &= ~BIT(to);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match],
+ true);
+}
+
+static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
+ !priv->info->can_limit_mcast_flood) {
+ bool multicast = !!(flags.val & BR_MCAST_FLOOD);
+ bool unicast = !!(flags.val & BR_FLOOD);
+
+ if (unicast != multicast) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This chip cannot configure multicast flooding independently of unicast");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (flags.mask & BR_LEARNING) {
+ bool learn_ena = !!(flags.val & BR_LEARNING);
+
+ rc = sja1105_port_set_learning(priv, port, learn_ena);
+ if (rc)
+ return rc;
+ }
+
+ if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
+ rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
+ if (rc)
+ return rc;
+ }
+
+ /* For chips that can't offload BR_MCAST_FLOOD independently, there
+ * is nothing to do here, we ensured the configuration is in sync by
+ * offloading BR_FLOOD.
+ */
+ if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
+ rc = sja1105_port_mcast_flood(priv, port, flags,
+ extack);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.setup = sja1105_setup,
.port_fdb_del = sja1105_fdb_del,
.port_bridge_join = sja1105_bridge_join,
.port_bridge_leave = sja1105_bridge_leave,
+ .port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
+ .port_bridge_flags = sja1105_port_bridge_flags,
.port_stp_state_set = sja1105_bridge_stp_state_set,
.port_vlan_filtering = sja1105_vlan_filtering,
.port_vlan_add = sja1105_vlan_add,
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.qinq_tpid = ETH_P_8021AD,
+ .can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,