]> git.baikalelectronics.ru Git - kernel.git/commitdiff
userfaultfd: hugetlbfs: add copy_huge_page_from_user for hugetlb userfaultfd support
authorMike Kravetz <mike.kravetz@oracle.com>
Wed, 22 Feb 2017 23:42:49 +0000 (15:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2017 00:41:28 +0000 (16:41 -0800)
userfaultfd UFFDIO_COPY allows user level code to copy data to a page at
fault time.  The data is copied from user space to a newly allocated
huge page.  The new routine copy_huge_page_from_user performs this copy.

Link: http://lkml.kernel.org/r/20161216144821.5183-17-aarcange@redhat.com
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/memory.c

index 3787f047a0986e00c52d23c0328b0366e96bb102..6bc7f2c1a6d1b7eb1a5db985dabec6b6d650d3e7 100644 (file)
@@ -2424,6 +2424,9 @@ extern void clear_huge_page(struct page *page,
 extern void copy_user_huge_page(struct page *dst, struct page *src,
                                unsigned long addr, struct vm_area_struct *vma,
                                unsigned int pages_per_huge_page);
+extern long copy_huge_page_from_user(struct page *dst_page,
+                               const void __user *usr_src,
+                               unsigned int pages_per_huge_page);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 extern struct page_ext_operations debug_guardpage_ops;
index ececdc4a2892e654c9e0ae8959e76b4810c02d4a..4ade940d105c4a104fae44dbff7912b077a527c9 100644 (file)
@@ -4152,6 +4152,31 @@ void copy_user_huge_page(struct page *dst, struct page *src,
                copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
        }
 }
+
+long copy_huge_page_from_user(struct page *dst_page,
+                               const void __user *usr_src,
+                               unsigned int pages_per_huge_page)
+{
+       void *src = (void *)usr_src;
+       void *page_kaddr;
+       unsigned long i, rc = 0;
+       unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+
+       for (i = 0; i < pages_per_huge_page; i++) {
+               page_kaddr = kmap_atomic(dst_page + i);
+               rc = copy_from_user(page_kaddr,
+                               (const void __user *)(src + i * PAGE_SIZE),
+                               PAGE_SIZE);
+               kunmap_atomic(page_kaddr);
+
+               ret_val -= (PAGE_SIZE - rc);
+               if (rc)
+                       break;
+
+               cond_resched();
+       }
+       return ret_val;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS