unsigned int frag_offset;
struct page *frag_page;
long frag_users;
+ u32 xdp_mem_id;
/*
* Data structure for allocation side
struct page_pool *page_pool_create(const struct page_pool_params *params);
+struct xdp_mem_info;
+
#ifdef CONFIG_PAGE_POOL
void page_pool_destroy(struct page_pool *pool);
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+ struct xdp_mem_info *mem);
void page_pool_release_page(struct page_pool *pool, struct page *page);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
}
static inline void page_pool_use_xdp_mem(struct page_pool *pool,
- void (*disconnect)(void *))
+ void (*disconnect)(void *),
+ struct xdp_mem_info *mem)
{
}
static inline void page_pool_release_page(struct page_pool *pool,
schedule_delayed_work(&pool->release_dw, DEFER_TIME);
}
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+ struct xdp_mem_info *mem)
{
refcount_inc(&pool->user_cnt);
pool->disconnect = disconnect;
+ pool->xdp_mem_id = mem->id;
}
void page_pool_destroy(struct page_pool *pool)
}
if (type == MEM_TYPE_PAGE_POOL)
- page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
+ page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
mutex_unlock(&mem_id_lock);