]> git.baikalelectronics.ru Git - kernel.git/commitdiff
userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing
authorMike Kravetz <mike.kravetz@oracle.com>
Wed, 22 Feb 2017 23:42:58 +0000 (15:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2017 00:41:28 +0000 (16:41 -0800)
The new routine copy_huge_page_from_user() uses kmap_atomic() to map
PAGE_SIZE pages.  However, this prevents page faults in the subsequent
call to copy_from_user().  This is OK in the case where the routine is
copied with mmap_sema held.  However, in another case we want to allow
page faults.  So, add a new argument allow_pagefault to indicate if the
routine should allow page faults.

[dan.carpenter@oracle.com: unmap the correct pointer]
Link: http://lkml.kernel.org/r/20170113082608.GA3548@mwanda
[akpm@linux-foundation.org: kunmap() takes a page*, per Hugh]
Link: http://lkml.kernel.org/r/20161216144821.5183-20-aarcange@redhat.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/hugetlb.c
mm/memory.c
mm/userfaultfd.c

index 6bc7f2c1a6d1b7eb1a5db985dabec6b6d650d3e7..c3e2be2b3296024327b4a56412b72b7f1838a250 100644 (file)
@@ -2426,7 +2426,8 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
                                unsigned int pages_per_huge_page);
 extern long copy_huge_page_from_user(struct page *dst_page,
                                const void __user *usr_src,
-                               unsigned int pages_per_huge_page);
+                               unsigned int pages_per_huge_page,
+                               bool allow_pagefault);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 extern struct page_ext_operations debug_guardpage_ops;
index dec628b26f5993d7095d250c3cc29c9ba532908c..5d20af921a30212b7a7a99a89e2116af9c592809 100644 (file)
@@ -3973,7 +3973,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                ret = copy_huge_page_from_user(page,
                                                (const void __user *) src_addr,
-                                               pages_per_huge_page(h));
+                                               pages_per_huge_page(h), false);
 
                /* fallback to copy_from_user outside mmap_sem */
                if (unlikely(ret)) {
index 4ade940d105c4a104fae44dbff7912b077a527c9..d7676a68c80ab5d54ff6fd11447f4ac83fe5340f 100644 (file)
@@ -4155,7 +4155,8 @@ void copy_user_huge_page(struct page *dst, struct page *src,
 
 long copy_huge_page_from_user(struct page *dst_page,
                                const void __user *usr_src,
-                               unsigned int pages_per_huge_page)
+                               unsigned int pages_per_huge_page,
+                               bool allow_pagefault)
 {
        void *src = (void *)usr_src;
        void *page_kaddr;
@@ -4163,11 +4164,17 @@ long copy_huge_page_from_user(struct page *dst_page,
        unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
 
        for (i = 0; i < pages_per_huge_page; i++) {
-               page_kaddr = kmap_atomic(dst_page + i);
+               if (allow_pagefault)
+                       page_kaddr = kmap(dst_page + i);
+               else
+                       page_kaddr = kmap_atomic(dst_page + i);
                rc = copy_from_user(page_kaddr,
                                (const void __user *)(src + i * PAGE_SIZE),
                                PAGE_SIZE);
-               kunmap_atomic(page_kaddr);
+               if (allow_pagefault)
+                       kunmap(dst_page + i);
+               else
+                       kunmap_atomic(page_kaddr);
 
                ret_val -= (PAGE_SIZE - rc);
                if (rc)
index ef0495bfd17a983d4cbd08d51edf7168e3fe3023..09976745be2307a97945449584a2696983bc75eb 100644 (file)
@@ -274,7 +274,7 @@ retry:
 
                        err = copy_huge_page_from_user(page,
                                                (const void __user *)src_addr,
-                                               pages_per_huge_page(h));
+                                               pages_per_huge_page(h), true);
                        if (unlikely(err)) {
                                err = -EFAULT;
                                goto out;