refcount_t users;
struct page **pgs;
u32 npgs;
- u16 queue_id;
- u8 need_wakeup;
u8 flags;
int id;
- struct net_device *dev;
bool zc;
spinlock_t xsk_tx_list_lock;
struct list_head xsk_tx_list;
u32 headroom;
u32 chunk_size;
u32 frame_len;
+ u16 queue_id;
+ u8 cached_need_wakeup;
+ bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
struct xdp_umem *umem;
void *addrs;
struct device *dev;
+ struct net_device *netdev;
refcount_t users;
struct work_struct work;
struct xdp_buff_xsk *free_heads[];
}
}
-void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
- u16 queue_id)
-{
- umem->dev = dev;
- umem->queue_id = queue_id;
-
- dev_hold(dev);
-}
-
-void xdp_umem_clear_dev(struct xdp_umem *umem)
-{
- dev_put(umem->dev);
- umem->dev = NULL;
- umem->zc = false;
-}
-
static void xdp_umem_release(struct xdp_umem *umem)
{
- xdp_umem_clear_dev(umem);
-
+ umem->zc = false;
ida_simple_remove(&umem_ida, umem->id);
xdp_umem_unpin_pages(umem);
return -EINVAL;
}
- if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
- XDP_UMEM_USES_NEED_WAKEUP))
+ if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
return -EINVAL;
if (!unaligned_chunks && !is_power_of_2(chunk_size))
#include <net/xdp_sock_drv.h>
-void xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
- u16 queue_id);
-void xdp_umem_clear_dev(struct xdp_umem *umem);
-bool xdp_umem_validate_queues(struct xdp_umem *umem);
void xdp_get_umem(struct xdp_umem *umem);
void xdp_put_umem(struct xdp_umem *umem);
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
- struct xdp_umem *umem = pool->umem;
-
- if (umem->need_wakeup & XDP_WAKEUP_RX)
+ if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
return;
pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
- umem->need_wakeup |= XDP_WAKEUP_RX;
+ pool->cached_need_wakeup |= XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs;
- if (umem->need_wakeup & XDP_WAKEUP_TX)
+ if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
return;
rcu_read_lock();
}
rcu_read_unlock();
- umem->need_wakeup |= XDP_WAKEUP_TX;
+ pool->cached_need_wakeup |= XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{
- struct xdp_umem *umem = pool->umem;
-
- if (!(umem->need_wakeup & XDP_WAKEUP_RX))
+ if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
return;
pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
- umem->need_wakeup &= ~XDP_WAKEUP_RX;
+ pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs;
- if (!(umem->need_wakeup & XDP_WAKEUP_TX))
+ if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
return;
rcu_read_lock();
}
rcu_read_unlock();
- umem->need_wakeup &= ~XDP_WAKEUP_TX;
+ pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{
- return pool->umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
+ return pool->uses_need_wakeup;
}
EXPORT_SYMBOL(xsk_uses_need_wakeup);
__poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
if (unlikely(!xsk_is_bound(xs)))
return mask;
- umem = xs->umem;
+ pool = xs->pool;
- if (umem->need_wakeup) {
+ if (pool->cached_need_wakeup) {
if (xs->zc)
- xsk_wakeup(xs, umem->need_wakeup);
+ xsk_wakeup(xs, pool->cached_need_wakeup);
else
/* Poll needs to drive Tx also in copy mode */
__xsk_sendmsg(sk);
goto out_unlock;
} else {
/* This xsk has its own umem. */
- xdp_umem_assign_dev(xs->umem, dev, qid);
xs->pool = xp_create_and_assign_umem(xs, xs->umem);
if (!xs->pool) {
err = -ENOMEM;
- xdp_umem_clear_dev(xs->umem);
goto out_unlock;
}
if (err) {
xp_destroy(xs->pool);
xs->pool = NULL;
- xdp_umem_clear_dev(xs->umem);
goto out_unlock;
}
}
/* Clear device references. */
xp_clear_dev(xs->pool);
- xdp_umem_clear_dev(xs->umem);
}
mutex_unlock(&xs->mutex);
}
#define XSK_NEXT_PG_CONTIG_SHIFT 0
#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
-/* Flags for the umem flags field.
- *
- * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
- * flags. See inlude/uapi/include/linux/if_xdp.h.
- */
-#define XDP_UMEM_USES_NEED_WAKEUP BIT(1)
-
struct xdp_ring_offset_v1 {
__u64 producer;
__u64 consumer;
}
EXPORT_SYMBOL(xp_set_rxq_info);
-int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
u16 queue_id, u16 flags)
{
- struct xdp_umem *umem = pool->umem;
bool force_zc, force_copy;
struct netdev_bpf bpf;
int err = 0;
if (force_zc && force_copy)
return -EINVAL;
- if (xsk_get_pool_from_qid(dev, queue_id))
+ if (xsk_get_pool_from_qid(netdev, queue_id))
return -EBUSY;
- err = xsk_reg_pool_at_qid(dev, pool, queue_id);
+ err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
if (err)
return err;
if (flags & XDP_USE_NEED_WAKEUP) {
- umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
+ pool->uses_need_wakeup = true;
/* Tx needs to be explicitly woken up the first time.
* Also for supporting drivers that do not implement this
* feature. They will always have to call sendto().
*/
- umem->need_wakeup = XDP_WAKEUP_TX;
+ pool->cached_need_wakeup = XDP_WAKEUP_TX;
}
+ dev_hold(netdev);
+
if (force_copy)
/* For copy-mode, we are done. */
return 0;
- if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) {
+ if (!netdev->netdev_ops->ndo_bpf ||
+ !netdev->netdev_ops->ndo_xsk_wakeup) {
err = -EOPNOTSUPP;
goto err_unreg_pool;
}
bpf.xsk.pool = pool;
bpf.xsk.queue_id = queue_id;
- err = dev->netdev_ops->ndo_bpf(dev, &bpf);
+ err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
if (err)
goto err_unreg_pool;
- umem->zc = true;
+ pool->netdev = netdev;
+ pool->queue_id = queue_id;
+ pool->umem->zc = true;
return 0;
err_unreg_pool:
if (!force_zc)
err = 0; /* fallback to copy mode */
if (err)
- xsk_clear_pool_at_qid(dev, queue_id);
+ xsk_clear_pool_at_qid(netdev, queue_id);
return err;
}
void xp_clear_dev(struct xsk_buff_pool *pool)
{
- struct xdp_umem *umem = pool->umem;
struct netdev_bpf bpf;
int err;
ASSERT_RTNL();
- if (!umem->dev)
+ if (!pool->netdev)
return;
- if (umem->zc) {
+ if (pool->umem->zc) {
bpf.command = XDP_SETUP_XSK_POOL;
bpf.xsk.pool = NULL;
- bpf.xsk.queue_id = umem->queue_id;
+ bpf.xsk.queue_id = pool->queue_id;
- err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
+ err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
if (err)
- WARN(1, "failed to disable umem!\n");
+ WARN(1, "Failed to disable zero-copy!\n");
}
- xsk_clear_pool_at_qid(umem->dev, umem->queue_id);
+ xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
+ dev_put(pool->netdev);
+ pool->netdev = NULL;
}
static void xp_release_deferred(struct work_struct *work)
du.num_pages = umem->npgs;
du.chunk_size = umem->chunk_size;
du.headroom = umem->headroom;
- du.ifindex = umem->dev ? umem->dev->ifindex : 0;
- du.queue_id = umem->queue_id;
+ du.ifindex = pool->netdev ? pool->netdev->ifindex : 0;
+ du.queue_id = pool->queue_id;
du.flags = 0;
if (umem->zc)
du.flags |= XDP_DU_F_ZEROCOPY;