]> git.baikalelectronics.ru Git - kernel.git/commitdiff
filemap: Cache the value of vm_flags
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 25 May 2022 18:23:45 +0000 (14:23 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 9 Jun 2022 20:24:25 +0000 (16:24 -0400)
After we have unlocked the mmap_lock for I/O, the file is pinned, but
the VMA is not.  Checking this flag after that can be a use-after-free.
It's not a terribly interesting use-after-free as it can only read one
bit, and it's used to decide whether to read 2MB or 4MB.  But it
upsets the automated tools and it's generally bad practice anyway,
so let's fix it.

Reported-by: syzbot+5b96d55e5b54924c77ad@syzkaller.appspotmail.com
Fixes: bb527e717ffd ("mm/filemap: Support VM_HUGEPAGE for file mappings")
Cc: stable@vger.kernel.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
mm/filemap.c

index 9daeaab36081fd0ab86a9aaf4f8b7b62cfd45086..ac3775c1ce4cd853857d8838d3a86f087f3b9aa4 100644 (file)
@@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
        struct file *fpin = NULL;
+       unsigned long vm_flags = vmf->vma->vm_flags;
        unsigned int mmap_miss;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        /* Use the readahead code, even if readahead is disabled */
-       if (vmf->vma->vm_flags & VM_HUGEPAGE) {
+       if (vm_flags & VM_HUGEPAGE) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
                ra->size = HPAGE_PMD_NR;
@@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
                 * Fetch two PMD folios, so we get the chance to actually
                 * readahead, unless we've been told not to.
                 */
-               if (!(vmf->vma->vm_flags & VM_RAND_READ))
+               if (!(vm_flags & VM_RAND_READ))
                        ra->size *= 2;
                ra->async_size = HPAGE_PMD_NR;
                page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
@@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
 #endif
 
        /* If we don't want any read-ahead, don't bother */
-       if (vmf->vma->vm_flags & VM_RAND_READ)
+       if (vm_flags & VM_RAND_READ)
                return fpin;
        if (!ra->ra_pages)
                return fpin;
 
-       if (vmf->vma->vm_flags & VM_SEQ_READ) {
+       if (vm_flags & VM_SEQ_READ) {
                fpin = maybe_unlock_mmap_for_io(vmf, fpin);
                page_cache_sync_ra(&ractl, ra->ra_pages);
                return fpin;