]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: rename __page_frag functions to __page_frag_cache, drop order from drain
authorAlexander Duyck <alexander.h.duyck@intel.com>
Wed, 11 Jan 2017 00:58:09 +0000 (16:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Jan 2017 02:31:55 +0000 (18:31 -0800)
This patch does two things.

First it goes through and renames the __page_frag prefixed functions to
__page_frag_cache so that we can be clear that we are draining or
refilling the cache, not the frags themselves.

Second we drop the order parameter from __page_frag_cache_drain since we
don't actually need to pass it since all fragments are either order 0 or
must be a compound page.

Link: http://lkml.kernel.org/r/20170104023954.13451.5678.stgit@localhost.localdomain
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/net/ethernet/intel/igb/igb_main.c
include/linux/gfp.h
mm/page_alloc.c

index a761001308dcc190d8c9bad57dad40f09dc49caa..1515abaa5ac9cab53ef4aab0ee05104c5bd2a61f 100644 (file)
@@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                                     PAGE_SIZE,
                                     DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
-               __page_frag_drain(buffer_info->page, 0,
-                                 buffer_info->pagecnt_bias);
+               __page_frag_cache_drain(buffer_info->page,
+                                       buffer_info->pagecnt_bias);
 
                buffer_info->page = NULL;
        }
@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
                dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
                                     PAGE_SIZE, DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
-               __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
+               __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
        }
 
        /* clear contents of rx_buffer */
index ed77a86fbbb053243f8a8ce6bfd67036170a28d0..0fe0b6295ab58edfe6745467487fe19beba0d723 100644 (file)
@@ -499,8 +499,7 @@ extern void free_hot_cold_page(struct page *page, bool cold);
 extern void free_hot_cold_page_list(struct list_head *list, bool cold);
 
 struct page_frag_cache;
-extern void __page_frag_drain(struct page *page, unsigned int order,
-                             unsigned int count);
+extern void __page_frag_cache_drain(struct page *page, unsigned int count);
 extern void *page_frag_alloc(struct page_frag_cache *nc,
                             unsigned int fragsz, gfp_t gfp_mask);
 extern void page_frag_free(void *addr);
index 097893ffe1940a0708874f5eaadddf83f73f74ce..d604d2596b7bed41b9748ee3242571b771db4d5e 100644 (file)
@@ -3896,8 +3896,8 @@ EXPORT_SYMBOL(free_pages);
  * drivers to provide a backing region of memory for use as either an
  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
  */
-static struct page *__page_frag_refill(struct page_frag_cache *nc,
-                                      gfp_t gfp_mask)
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
+                                            gfp_t gfp_mask)
 {
        struct page *page = NULL;
        gfp_t gfp = gfp_mask;
@@ -3917,19 +3917,20 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
        return page;
 }
 
-void __page_frag_drain(struct page *page, unsigned int order,
-                      unsigned int count)
+void __page_frag_cache_drain(struct page *page, unsigned int count)
 {
        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 
        if (page_ref_sub_and_test(page, count)) {
+               unsigned int order = compound_order(page);
+
                if (order == 0)
                        free_hot_cold_page(page, false);
                else
                        __free_pages_ok(page, order);
        }
 }
-EXPORT_SYMBOL(__page_frag_drain);
+EXPORT_SYMBOL(__page_frag_cache_drain);
 
 void *page_frag_alloc(struct page_frag_cache *nc,
                      unsigned int fragsz, gfp_t gfp_mask)
@@ -3940,7 +3941,7 @@ void *page_frag_alloc(struct page_frag_cache *nc,
 
        if (unlikely(!nc->va)) {
 refill:
-               page = __page_frag_refill(nc, gfp_mask);
+               page = __page_frag_cache_refill(nc, gfp_mask);
                if (!page)
                        return NULL;