]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: Introduce mm_struct.has_pinned
authorPeter Xu <peterx@redhat.com>
Fri, 25 Sep 2020 22:25:57 +0000 (18:25 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 27 Sep 2020 18:21:35 +0000 (11:21 -0700)
(Commit message majorly collected from Jason Gunthorpe)

Reduce the chance of false positive from page_maybe_dma_pinned() by
keeping track if the mm_struct has ever been used with pin_user_pages().
This allows cases that might drive up the page ref_count to avoid any
penalty from handling dma_pinned pages.

Future work is planned, to provide a more sophisticated solution, likely
to turn it into a real counter.  For now, make it atomic_t but use it as
a boolean for simplicity.

Suggested-by: Jason Gunthorpe <jgg@ziepe.ca>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm_types.h
kernel/fork.c
mm/gup.c

index 496c3ff97cce7abea90e18f4a6cced9be0847769..ed028af3cb19b8f31d4969c78ff6ca0ac123c4d1 100644 (file)
@@ -436,6 +436,16 @@ struct mm_struct {
                 */
                atomic_t mm_count;
 
+               /**
+                * @has_pinned: Whether this mm has pinned any pages.  This can
+                * be either replaced in the future by @pinned_vm when it
+                * becomes stable, or grow into a counter on its own. We're
+                * aggresive on this bit now - even if the pinned pages were
+                * unpinned later on, we'll still keep this bit set for the
+                * lifecycle of this mm just for simplicity.
+                */
+               atomic_t has_pinned;
+
 #ifdef CONFIG_MMU
                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 #endif
index 49677d668de4da7b5115b6a9d8caacaf643fceba..e65d8192d08081712cd54a8331328042071e59c9 100644 (file)
@@ -1011,6 +1011,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_pgtables_bytes_init(mm);
        mm->map_count = 0;
        mm->locked_vm = 0;
+       atomic_set(&mm->has_pinned, 0);
        atomic64_set(&mm->pinned_vm, 0);
        memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
        spin_lock_init(&mm->page_table_lock);
index 578bf5bd8bf83ab31f8f364a58034c09390595b9..dfe781d2ad4c16ddaa09aea3c476e6d1cb8fe953 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1255,6 +1255,9 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                BUG_ON(*locked != 1);
        }
 
+       if (flags & FOLL_PIN)
+               atomic_set(&current->mm->has_pinned, 1);
+
        /*
         * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
         * is to set FOLL_GET if the caller wants pages[] filled in (but has
@@ -2660,6 +2663,9 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
                                       FOLL_FAST_ONLY)))
                return -EINVAL;
 
+       if (gup_flags & FOLL_PIN)
+               atomic_set(&current->mm->has_pinned, 1);
+
        if (!(gup_flags & FOLL_FAST_ONLY))
                might_lock_read(&current->mm->mmap_lock);