.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 2,
+ .hash_offset = 2,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.caps = MT7986_CAPS,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
+ .hash_offset = 4,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
enable * MTK_PPE_CACHE_CTL_EN);
}
-static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
{
u32 hv1, hv2, hv3;
u32 hash;
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
- hash <<= 1;
+ hash <<= (ffs(eth->soc->hash_offset) - 1);
hash &= MTK_PPE_ENTRIES - 1;
return hash;
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ const struct mtk_soc_data *soc = ppe->eth->soc;
u32 hash;
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return mtk_foe_entry_commit_l2(ppe, entry);
- hash = mtk_ppe_hash_entry(&entry->data);
+ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
entry->hash = 0xffff;
spin_lock_bh(&ppe_lock);
- hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
spin_unlock_bh(&ppe_lock);
return 0;
mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u16 hash)
{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
struct mtk_flow_entry *flow_info;
struct mtk_foe_entry foe, *hwe;
struct mtk_foe_mac_info *l2;
flow_info->l2_data.base_flow = entry;
flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
flow_info->hash = hash;
- hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&flow_info->list,
+ &ppe->foe_flow[hash / soc->hash_offset]);
hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
hwe = &ppe->foe_table[hash];
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
- struct hlist_head *head = &ppe->foe_flow[hash / 2];
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
struct mtk_flow_entry *entry;
struct mtk_foe_bridge key = {};
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct device *dev = eth->dev;
struct mtk_foe_entry *foe;
struct mtk_ppe *ppe;
+ u32 foe_flow_size;
ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
if (!ppe)
ppe->foe_table = foe;
+ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
+ sizeof(*ppe->foe_flow);
+ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return NULL;
+
mtk_ppe_debugfs_init(ppe);
return ppe;