};
}
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ union mlxsw_sp_l3addr old_saddr, new_saddr;
+ union mlxsw_sp_l3addr old_daddr, new_daddr;
+ struct ip_tunnel_parm new_parms;
+ bool update_tunnel = false;
+ bool update_decap = false;
+ bool update_nhs = false;
+ int err = 0;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev);
+
+ new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
+ new_parms);
+ old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
+ ipip_entry->parms);
+ new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
+ new_parms);
+ old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
+ ipip_entry->parms);
+
+ if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
+ u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
+
+ /* Since the local address has changed, if there is another
+ * tunnel with a matching saddr, both need to be demoted.
+ */
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
+ MLXSW_SP_L3_PROTO_IPV4,
+ new_saddr, ul_tb_id,
+ ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ update_tunnel = true;
+ } else if (mlxsw_sp_ipip_parms_okey(ipip_entry->parms) !=
+ mlxsw_sp_ipip_parms_okey(new_parms)) {
+ update_tunnel = true;
+ } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
+ update_nhs = true;
+ } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) !=
+ mlxsw_sp_ipip_parms_ikey(new_parms)) {
+ update_decap = true;
+ }
+
+ if (update_tunnel)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, true, true,
+ extack);
+ else if (update_nhs)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true,
+ extack);
+ else if (update_decap)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, false,
+ extack);
+
+ ipip_entry->parms = new_parms;
+ return err;
+}
+
static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
.fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
+ .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
};
const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
+ struct ip_tunnel_parm parms;
};
struct mlxsw_sp_ipip_ops {
struct mlxsw_sp_ipip_entry *ipip_entry,
enum mlxsw_reg_ralue_op op,
u32 tunnel_index);
+
+ int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack);
};
extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[];
return __dev_get_by_index(net, tun->parms.link);
}
-static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
return ipip_entry;
kfree(ipip_entry);
}
-static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
- const union mlxsw_sp_l3addr *addr2)
-{
- return !memcmp(addr1, addr2, sizeof(*addr1));
-}
-
static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
const enum mlxsw_sp_l3proto ul_proto,
true, true, false, extack);
}
+static int
+mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ int err;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (!ipip_entry)
+ /* A change might make a tunnel eligible for offloading, but
+ * that is currently not implemented. What falls to slow path
+ * stays there.
+ */
+ return 0;
+
+ /* A change might make a tunnel not eligible for offloading. */
+ if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
+ ipip_entry->ipipt)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
+ return err;
+}
+
void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
ol_dev,
extack);
return 0;
+ case NETDEV_CHANGE:
+ extack = info->extack;
+ return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
+ ol_dev, extack);
}
return 0;
}
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh);
+static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
+ const union mlxsw_sp_l3addr *addr2)
+{
+ return !memcmp(addr1, addr2, sizeof(*addr1));
+}
+
#endif /* _MLXSW_ROUTER_H_*/