]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/gup: Turn compound_next() into gup_folio_next()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 23 Dec 2021 04:43:16 +0000 (23:43 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 21 Mar 2022 16:56:36 +0000 (12:56 -0400)
Convert both callers to work on folios instead of pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
mm/gup.c

index 23b83bc16e45233fbe3ac1574a2d2d3377582166..0bde28f0543f392a329f45eef22cbff27ad81ec5 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -230,20 +230,19 @@ static inline struct page *compound_range_next(struct page *start,
        return page;
 }
 
-static inline struct page *compound_next(struct page **list,
+static inline struct folio *gup_folio_next(struct page **list,
                unsigned long npages, unsigned long i, unsigned int *ntails)
 {
-       struct page *page;
+       struct folio *folio = page_folio(list[i]);
        unsigned int nr;
 
-       page = compound_head(list[i]);
        for (nr = i + 1; nr < npages; nr++) {
-               if (compound_head(list[nr]) != page)
+               if (page_folio(list[nr]) != folio)
                        break;
        }
 
        *ntails = nr - i;
-       return page;
+       return folio;
 }
 
 /**
@@ -271,17 +270,17 @@ static inline struct page *compound_next(struct page **list,
 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
                                 bool make_dirty)
 {
-       unsigned long index;
-       struct page *head;
-       unsigned int ntails;
+       unsigned long i;
+       struct folio *folio;
+       unsigned int nr;
 
        if (!make_dirty) {
                unpin_user_pages(pages, npages);
                return;
        }
 
-       for (index = 0; index < npages; index += ntails) {
-               head = compound_next(pages, npages, index, &ntails);
+       for (i = 0; i < npages; i += nr) {
+               folio = gup_folio_next(pages, npages, i, &nr);
                /*
                 * Checking PageDirty at this point may race with
                 * clear_page_dirty_for_io(), but that's OK. Two key
@@ -302,9 +301,12 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
                 * written back, so it gets written back again in the
                 * next writeback cycle. This is harmless.
                 */
-               if (!PageDirty(head))
-                       set_page_dirty_lock(head);
-               put_compound_head(head, ntails, FOLL_PIN);
+               if (!folio_test_dirty(folio)) {
+                       folio_lock(folio);
+                       folio_mark_dirty(folio);
+                       folio_unlock(folio);
+               }
+               gup_put_folio(folio, nr, FOLL_PIN);
        }
 }
 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
@@ -357,9 +359,9 @@ EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
  */
 void unpin_user_pages(struct page **pages, unsigned long npages)
 {
-       unsigned long index;
-       struct page *head;
-       unsigned int ntails;
+       unsigned long i;
+       struct folio *folio;
+       unsigned int nr;
 
        /*
         * If this WARN_ON() fires, then the system *might* be leaking pages (by
@@ -369,9 +371,9 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
        if (WARN_ON(IS_ERR_VALUE(npages)))
                return;
 
-       for (index = 0; index < npages; index += ntails) {
-               head = compound_next(pages, npages, index, &ntails);
-               put_compound_head(head, ntails, FOLL_PIN);
+       for (i = 0; i < npages; i += nr) {
+               folio = gup_folio_next(pages, npages, i, &nr);
+               gup_put_folio(folio, nr, FOLL_PIN);
        }
 }
 EXPORT_SYMBOL(unpin_user_pages);