]> git.baikalelectronics.ru Git - kernel.git/commitdiff
userfaultfd: wp: support write protection for userfault vma range
authorShaohua Li <shli@fb.com>
Tue, 7 Apr 2020 03:06:09 +0000 (20:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Apr 2020 17:43:39 +0000 (10:43 -0700)
Add API to enable/disable writeprotect a vma range.  Unlike mprotect, this
doesn't split/merge vmas.

[peterx@redhat.com:
 - use the helper to find VMA;
 - return -ENOENT if not found to match mcopy case;
 - use the new MM_CP_UFFD_WP* flags for change_protection
 - check against mmap_changing for failures
 - replace find_dst_vma with vma_find_uffd]
Signed-off-by: Shaohua Li <shli@fb.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Bobby Powers <bobbypowers@gmail.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Denis Plotnikov <dplotnikov@virtuozzo.com>
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: Martin Cracauer <cracauer@cons.org>
Cc: Marty McFadden <mcfadden8@llnl.gov>
Cc: Maya Gokhale <gokhale2@llnl.gov>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Link: http://lkml.kernel.org/r/20200220163112.11409-13-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/userfaultfd_k.h
mm/userfaultfd.c

index dcd33172b728c806d32270c88e5648637ce3536d..a8e5f3ea9bb2bb175bb8a3b60219fc40a265ac9f 100644 (file)
@@ -41,6 +41,9 @@ extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
                              unsigned long dst_start,
                              unsigned long len,
                              bool *mmap_changing);
+extern int mwriteprotect_range(struct mm_struct *dst_mm,
+                              unsigned long start, unsigned long len,
+                              bool enable_wp, bool *mmap_changing);
 
 /* mm helpers */
 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
index 7d6ab05be019896892ab3e793ab1364dba2a5adb..512576e171ce56791444965937f228f6ef7192b5 100644 (file)
@@ -638,3 +638,57 @@ ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
 {
        return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
 }
+
+int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
+                       unsigned long len, bool enable_wp, bool *mmap_changing)
+{
+       struct vm_area_struct *dst_vma;
+       pgprot_t newprot;
+       int err;
+
+       /*
+        * Sanitize the command parameters:
+        */
+       BUG_ON(start & ~PAGE_MASK);
+       BUG_ON(len & ~PAGE_MASK);
+
+       /* Does the address range wrap, or is the span zero-sized? */
+       BUG_ON(start + len <= start);
+
+       down_read(&dst_mm->mmap_sem);
+
+       /*
+        * If memory mappings are changing because of non-cooperative
+        * operation (e.g. mremap) running in parallel, bail out and
+        * request the user to retry later
+        */
+       err = -EAGAIN;
+       if (mmap_changing && READ_ONCE(*mmap_changing))
+               goto out_unlock;
+
+       err = -ENOENT;
+       dst_vma = find_dst_vma(dst_mm, start, len);
+       /*
+        * Make sure the vma is not shared, that the dst range is
+        * both valid and fully within a single existing vma.
+        */
+       if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+               goto out_unlock;
+       if (!userfaultfd_wp(dst_vma))
+               goto out_unlock;
+       if (!vma_is_anonymous(dst_vma))
+               goto out_unlock;
+
+       if (enable_wp)
+               newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
+       else
+               newprot = vm_get_page_prot(dst_vma->vm_flags);
+
+       change_protection(dst_vma, start, start + len, newprot,
+                         enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
+
+       err = 0;
+out_unlock:
+       up_read(&dst_mm->mmap_sem);
+       return err;
+}