u32 headroom;
u32 chunk_size;
u32 chunks;
+ u32 npgs;
struct user_struct *user;
refcount_t users;
- struct page **pgs;
- u32 npgs;
u8 flags;
- int id;
bool zc;
+ struct page **pgs;
+ int id;
struct list_head xsk_dma_list;
};
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
- struct xsk_queue *rx;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
XSK_BOUND,
XSK_UNBOUND,
} state;
- /* Protects multiple processes in the control path */
- struct mutex mutex;
+
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
};
struct xsk_buff_pool {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
/* For performance reasons, each buff pool has its own array of dma_pages
* even when they are identical.
*/
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
- u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
- u16 queue_id;
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
- struct xdp_umem *umem;
void *addrs;
- struct device *dev;
- struct net_device *netdev;
- struct list_head xsk_tx_list;
- /* Protects modifications to the xsk_tx_list */
- spinlock_t xsk_tx_list_lock;
- refcount_t users;
- struct work_struct work;
struct xdp_buff_xsk *free_heads[];
};