]> git.baikalelectronics.ru Git - kernel.git/commitdiff
xsk: Move addrs from buffer pool to umem
authorMagnus Karlsson <magnus.karlsson@intel.com>
Fri, 28 Aug 2020 08:26:21 +0000 (10:26 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 31 Aug 2020 19:15:04 +0000 (21:15 +0200)
Replicate the addrs pointer in the buffer pool to the umem. This mapping
will be the same for all buffer pools sharing the same umem. In the
buffer pool we leave the addrs pointer for performance reasons.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-8-git-send-email-magnus.karlsson@intel.com
include/net/xdp_sock.h
net/xdp/xdp_umem.c
net/xdp/xsk_buff_pool.c

index 9a61d05ec1326b018c180a9d0a2e708db76b4d46..126d24364b5a28e73878429add0be484a078221f 100644 (file)
@@ -18,6 +18,7 @@ struct xsk_queue;
 struct xdp_buff;
 
 struct xdp_umem {
+       void *addrs;
        u64 size;
        u32 headroom;
        u32 chunk_size;
index 77515925f3c51344774d4640c91cc084633d6002..77604c30aa0f86da1149cfa8d77c0099d2c691da 100644 (file)
@@ -39,11 +39,27 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
        }
 }
 
+static void xdp_umem_addr_unmap(struct xdp_umem *umem)
+{
+       vunmap(umem->addrs);
+       umem->addrs = NULL;
+}
+
+static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
+                            u32 nr_pages)
+{
+       umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+       if (!umem->addrs)
+               return -ENOMEM;
+       return 0;
+}
+
 static void xdp_umem_release(struct xdp_umem *umem)
 {
        umem->zc = false;
        ida_simple_remove(&umem_ida, umem->id);
 
+       xdp_umem_addr_unmap(umem);
        xdp_umem_unpin_pages(umem);
 
        xdp_umem_unaccount_pages(umem);
@@ -192,8 +208,14 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        if (err)
                goto out_account;
 
+       err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
+       if (err)
+               goto out_unpin;
+
        return 0;
 
+out_unpin:
+       xdp_umem_unpin_pages(umem);
 out_account:
        xdp_umem_unaccount_pages(umem);
        return err;
index dbd913ef49286b40fc113abcc2cfb20675e35f4d..c56387439f679c2beb634fa5303b711b3e54eea7 100644 (file)
@@ -35,26 +35,11 @@ void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
        spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
 }
 
-static void xp_addr_unmap(struct xsk_buff_pool *pool)
-{
-       vunmap(pool->addrs);
-}
-
-static int xp_addr_map(struct xsk_buff_pool *pool,
-                      struct page **pages, u32 nr_pages)
-{
-       pool->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
-       if (!pool->addrs)
-               return -ENOMEM;
-       return 0;
-}
-
 void xp_destroy(struct xsk_buff_pool *pool)
 {
        if (!pool)
                return;
 
-       xp_addr_unmap(pool);
        kvfree(pool->heads);
        kvfree(pool);
 }
@@ -64,7 +49,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
 {
        struct xsk_buff_pool *pool;
        struct xdp_buff_xsk *xskb;
-       int err;
        u32 i;
 
        pool = kvzalloc(struct_size(pool, free_heads, umem->chunks),
@@ -86,6 +70,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
        pool->frame_len = umem->chunk_size - umem->headroom -
                XDP_PACKET_HEADROOM;
        pool->umem = umem;
+       pool->addrs = umem->addrs;
        INIT_LIST_HEAD(&pool->free_list);
        INIT_LIST_HEAD(&pool->xsk_tx_list);
        spin_lock_init(&pool->xsk_tx_list_lock);
@@ -103,9 +88,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
                pool->free_heads[i] = xskb;
        }
 
-       err = xp_addr_map(pool, umem->pgs, umem->npgs);
-       if (!err)
-               return pool;
+       return pool;
 
 out:
        xp_destroy(pool);