]> git.baikalelectronics.ru Git - kernel.git/commitdiff
hugetlb: don't delete vma_lock in hugetlb MADV_DONTNEED processing
authorMike Kravetz <mike.kravetz@oracle.com>
Mon, 14 Nov 2022 23:55:06 +0000 (15:55 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 22:49:40 +0000 (14:49 -0800)
madvise(MADV_DONTNEED) ends up calling zap_page_range() to clear page
tables associated with the address range.  For hugetlb vmas,
zap_page_range will call __unmap_hugepage_range_final.  However,
__unmap_hugepage_range_final assumes the passed vma is about to be removed
and deletes the vma_lock to prevent pmd sharing as the vma is on the way
out.  In the case of madvise(MADV_DONTNEED) the vma remains, but the
missing vma_lock prevents pmd sharing and could potentially lead to issues
with truncation/fault races.

This issue was originally reported here [1] as a BUG triggered in
page_try_dup_anon_rmap.  Prior to the introduction of the hugetlb
vma_lock, __unmap_hugepage_range_final cleared the VM_MAYSHARE flag to
prevent pmd sharing.  Subsequent faults on this vma were confused as
VM_MAYSHARE indicates a sharable vma, but was not set so page_mapping was
not set in new pages added to the page table.  This resulted in pages that
appeared anonymous in a VM_SHARED vma and triggered the BUG.

Address issue by adding a new zap flag ZAP_FLAG_UNMAP to indicate an unmap
call from unmap_vmas().  This is used to indicate the 'final' unmapping of
a hugetlb vma.  When called via MADV_DONTNEED, this flag is not set and
the vm_lock is not deleted.

[1] https://lore.kernel.org/lkml/CAO4mrfdLMXsao9RF4fUE8-Wfde8xmjsKrTNMNC9wjUb6JudD0g@mail.gmail.com/

Link: https://lkml.kernel.org/r/20221114235507.294320-3-mike.kravetz@oracle.com
Fixes: ee7e0c34358a ("mm: enable MADV_DONTNEED for hugetlb mappings")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reported-by: Wei Chen <harperchen1110@gmail.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/hugetlb.c
mm/memory.c

index cbfb489d381c25428a52ca27db192377e257e5a5..974ccca609d2c4b9c2016f5dd4f342c310b4d2a9 100644 (file)
@@ -1868,6 +1868,8 @@ struct zap_details {
  * default, the flag is not set.
  */
 #define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
+#define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
 
 #ifdef CONFIG_MMU
 extern bool can_do_mlock(void);
index f1385c3b6c9638119c29fd240a8fb501d7f9ba13..e36ca75311a5c8f4900f22685afcae1b775a57da 100644 (file)
@@ -5206,17 +5206,22 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 
        __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
 
-       /*
-        * Unlock and free the vma lock before releasing i_mmap_rwsem.  When
-        * the vma_lock is freed, this makes the vma ineligible for pmd
-        * sharing.  And, i_mmap_rwsem is required to set up pmd sharing.
-        * This is important as page tables for this unmapped range will
-        * be asynchrously deleted.  If the page tables are shared, there
-        * will be issues when accessed by someone else.
-        */
-       __hugetlb_vma_unlock_write_free(vma);
-
-       i_mmap_unlock_write(vma->vm_file->f_mapping);
+       if (zap_flags & ZAP_FLAG_UNMAP) {       /* final unmap */
+               /*
+                * Unlock and free the vma lock before releasing i_mmap_rwsem.
+                * When the vma_lock is freed, this makes the vma ineligible
+                * for pmd sharing.  And, i_mmap_rwsem is required to set up
+                * pmd sharing.  This is important as page tables for this
+                * unmapped range will be asynchrously deleted.  If the page
+                * tables are shared, there will be issues when accessed by
+                * someone else.
+                */
+               __hugetlb_vma_unlock_write_free(vma);
+               i_mmap_unlock_write(vma->vm_file->f_mapping);
+       } else {
+               i_mmap_unlock_write(vma->vm_file->f_mapping);
+               hugetlb_vma_unlock_write(vma);
+       }
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
index 9bc5edc35725a7e44c44dccb2b25d437872c8451..8c8420934d60345dbbc9b1b5861b0c470140b710 100644 (file)
@@ -1711,7 +1711,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
 {
        struct mmu_notifier_range range;
        struct zap_details details = {
-               .zap_flags = ZAP_FLAG_DROP_MARKER,
+               .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
                /* Careful - we need to zap private pages too! */
                .even_cows = true,
        };