]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/swapfile: unuse_pte can map random data if swap read fails
authorMiaohe Lin <linmiaohe@huawei.com>
Thu, 19 May 2022 12:50:26 +0000 (20:50 +0800)
committerakpm <akpm@linux-foundation.org>
Fri, 27 May 2022 16:33:45 +0000 (09:33 -0700)
Patch series "A few fixup patches for mm", v4.

This series contains a few patches to avoid mapping random data if swap
read fails and fix lost swap bits in unuse_pte.  Also we free hwpoison and
swapin error entry in madvise_free_pte_range and so on.  More details can
be found in the respective changelogs.

This patch (of 5):

There is a bug in unuse_pte(): when swap page happens to be unreadable,
page filled with random data is mapped into user address space.  In case
of error, a special swap entry indicating swap read fails is set to the
page table.  So the swapcache page can be freed and the user won't end up
with a permanently mounted swap because a sector is bad.  And if the page
is accessed later, the user process will be killed so that corrupted data
is never consumed.  On the other hand, if the page is never accessed, the
user won't even notice it.

Link: https://lkml.kernel.org/r/20220519125030.21486-1-linmiaohe@huawei.com
Link: https://lkml.kernel.org/r/20220519125030.21486-2-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Howells <dhowells@redhat.com>
Cc: NeilBrown <neilb@suse.de>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swap.h
include/linux/swapops.h
mm/memory.c
mm/swapfile.c

index f3ae17b43f20814034542cbbd7cbdcb4cb879484..0c0fed1b348f20cc04e3c8c3fc8e0672bacca7c7 100644 (file)
@@ -55,6 +55,10 @@ static inline int current_is_kswapd(void)
  * actions on faults.
  */
 
+#define SWP_SWAPIN_ERROR_NUM 1
+#define SWP_SWAPIN_ERROR     (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+                            SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
+                            SWP_PTE_MARKER_NUM)
 /*
  * PTE markers are used to persist information onto PTEs that are mapped with
  * file-backed memories.  As its name "PTE" hints, it should only be applied to
@@ -120,7 +124,8 @@ static inline int current_is_kswapd(void)
 
 #define MAX_SWAPFILES \
        ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
-       SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - SWP_PTE_MARKER_NUM)
+       SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
+       SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
 
 /*
  * Magic header for a swap area. The first part of the union is
index fe220df499f1fbf82bc562b39fac5b5bd9fb1313..f24775b418807765f457c85237b2f8c3ef54d807 100644 (file)
@@ -108,6 +108,16 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
        return xa_mk_value(entry.val);
 }
 
+static inline swp_entry_t make_swapin_error_entry(struct page *page)
+{
+       return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page));
+}
+
+static inline int is_swapin_error_entry(swp_entry_t entry)
+{
+       return swp_type(entry) == SWP_SWAPIN_ERROR;
+}
+
 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
 {
index 2bf5bca39567dc55ff4ceea07c31452b5d3fc078..54d106e0c9999d4aaac3c60f9961bf1224fc9bd2 100644 (file)
@@ -1487,7 +1487,8 @@ again:
                        /* Only drop the uffd-wp marker if explicitly requested */
                        if (!zap_drop_file_uffd_wp(details))
                                continue;
-               } else if (is_hwpoison_entry(entry)) {
+               } else if (is_hwpoison_entry(entry) ||
+                          is_swapin_error_entry(entry)) {
                        if (!should_zap_cows(details))
                                continue;
                } else {
@@ -3727,6 +3728,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                        ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
                } else if (is_hwpoison_entry(entry)) {
                        ret = VM_FAULT_HWPOISON;
+               } else if (is_swapin_error_entry(entry)) {
+                       ret = VM_FAULT_SIGBUS;
                } else if (is_pte_marker_entry(entry)) {
                        ret = handle_pte_marker(vmf);
                } else {
index a0eb690d9926ada167085249ee8dc435ac1948b0..b86d1cc8d00b46b41ff47ca65e157b88895f1077 100644 (file)
@@ -1788,6 +1788,17 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
                goto out;
        }
 
+       if (unlikely(!PageUptodate(page))) {
+               pte_t pteval;
+
+               dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
+               pteval = swp_entry_to_pte(make_swapin_error_entry(page));
+               set_pte_at(vma->vm_mm, addr, pte, pteval);
+               swap_free(entry);
+               ret = 0;
+               goto out;
+       }
+
        /* See do_swap_page() */
        BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
        BUG_ON(PageAnon(page) && PageAnonExclusive(page));