]> git.baikalelectronics.ru Git - kernel.git/commitdiff
Revert "mm/gup: small refactoring: simplify try_grab_page()"
authorJohn Hubbard <jhubbard@nvidia.com>
Wed, 2 Feb 2022 03:23:17 +0000 (19:23 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Feb 2022 14:51:42 +0000 (06:51 -0800)
This reverts commit 184128408b89b804c45d80e71b416366bd49fe6f

That commit did a refactoring that effectively combined fast and slow
gup paths (again).  And that was again incorrect, for two reasons:

 a) Fast gup and slow gup get reference counts on pages in different
    ways and with different goals: see Linus' writeup in commit
    06559c7f1be8 ("Revert "mm/gup: remove try_get_page(), call
    try_get_compound_head() directly""), and

 b) try_grab_compound_head() also has a specific check for
    "FOLL_LONGTERM && !is_pinned(page)", that assumes that the caller
    can fall back to slow gup. This resulted in new failures, as
    recently report by Will McVicker [1].

But (a) has problems too, even though they may not have been reported
yet.  So just revert this.

Link: https://lore.kernel.org/r/20220131203504.3458775-1-willmcvicker@google.com
Fixes: 184128408b89 ("mm/gup: small refactoring: simplify try_grab_page()")
Reported-and-tested-by: Will McVicker <willmcvicker@google.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Minchan Kim <minchan@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: stable@vger.kernel.org # 5.15
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/gup.c

index f0af462ac1e2b6695dda14c085a856b498188e4b..a9d4d724aef7497aba6ae6a787f20ff8dc8fa371 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -124,8 +124,8 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
  * considered failure, and furthermore, a likely bug in the caller, so a warning
  * is also emitted.
  */
-struct page *try_grab_compound_head(struct page *page,
-                                   int refs, unsigned int flags)
+__maybe_unused struct page *try_grab_compound_head(struct page *page,
+                                                  int refs, unsigned int flags)
 {
        if (flags & FOLL_GET)
                return try_get_compound_head(page, refs);
@@ -208,10 +208,35 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
  */
 bool __must_check try_grab_page(struct page *page, unsigned int flags)
 {
-       if (!(flags & (FOLL_GET | FOLL_PIN)))
-               return true;
+       WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
 
-       return try_grab_compound_head(page, 1, flags);
+       if (flags & FOLL_GET)
+               return try_get_page(page);
+       else if (flags & FOLL_PIN) {
+               int refs = 1;
+
+               page = compound_head(page);
+
+               if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+                       return false;
+
+               if (hpage_pincount_available(page))
+                       hpage_pincount_add(page, 1);
+               else
+                       refs = GUP_PIN_COUNTING_BIAS;
+
+               /*
+                * Similar to try_grab_compound_head(): even if using the
+                * hpage_pincount_add/_sub() routines, be sure to
+                * *also* increment the normal page refcount field at least
+                * once, so that the page really is pinned.
+                */
+               page_ref_add(page, refs);
+
+               mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
+       }
+
+       return true;
 }
 
 /**