]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: Cleanup __put_devmap_managed_page() vs ->page_free()
authorDan Williams <dan.j.williams@intel.com>
Fri, 31 Jan 2020 06:12:24 +0000 (22:12 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2020 18:30:37 +0000 (10:30 -0800)
After the removal of the device-public infrastructure there are only 2
->page_free() call backs in the kernel.  One of those is a
device-private callback in the nouveau driver, the other is a generic
wakeup needed in the DAX case.  In the hopes that all ->page_free()
callbacks can be migrated to common core kernel functionality, move the
device-private specific actions in __put_devmap_managed_page() under the
is_device_private_page() conditional, including the ->page_free()
callback.  For the other page types just open-code the generic wakeup.

Yes, the wakeup is only needed in the MEMORY_DEVICE_FSDAX case, but it
does no harm in the MEMORY_DEVICE_DEVDAX and MEMORY_DEVICE_PCI_P2PDMA
case.

Link: http://lkml.kernel.org/r/20200107224558.2362728-4-jhubbard@nvidia.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Björn Töpel <bjorn.topel@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Leon Romanovsky <leonro@mellanox.com>
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/nvdimm/pmem.c
mm/memremap.c

index ad8e4df1282bffa888612a20c7e99181d494b5dc..4eae441f86c96aa3e7dd2eeb07af140ae8d8d295 100644 (file)
@@ -337,13 +337,7 @@ static void pmem_release_disk(void *__pmem)
        put_disk(pmem->disk);
 }
 
-static void pmem_pagemap_page_free(struct page *page)
-{
-       wake_up_var(&page->_refcount);
-}
-
 static const struct dev_pagemap_ops fsdax_pagemap_ops = {
-       .page_free              = pmem_pagemap_page_free,
        .kill                   = pmem_pagemap_kill,
        .cleanup                = pmem_pagemap_cleanup,
 };
index c51c6bd2fe3425f0fc00dae12f29dcec5d395a42..f915d074ac202043dee6a44297dc8b45d58c7c21 100644 (file)
@@ -27,7 +27,8 @@ static void devmap_managed_enable_put(void)
 
 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
-       if (!pgmap->ops || !pgmap->ops->page_free) {
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE &&
+           (!pgmap->ops || !pgmap->ops->page_free)) {
                WARN(1, "Missing page_free method\n");
                return -EINVAL;
        }
@@ -414,44 +415,51 @@ void __put_devmap_managed_page(struct page *page)
 {
        int count = page_ref_dec_return(page);
 
-       /*
-        * If refcount is 1 then page is freed and refcount is stable as nobody
-        * holds a reference on the page.
-        */
-       if (count == 1) {
-               /* Clear Active bit in case of parallel mark_page_accessed */
-               __ClearPageActive(page);
-               __ClearPageWaiters(page);
+       /* still busy */
+       if (count > 1)
+               return;
 
-               mem_cgroup_uncharge(page);
+       /* only triggered by the dev_pagemap shutdown path */
+       if (count == 0) {
+               __put_page(page);
+               return;
+       }
 
-               /*
-                * When a device_private page is freed, the page->mapping field
-                * may still contain a (stale) mapping value. For example, the
-                * lower bits of page->mapping may still identify the page as
-                * an anonymous page. Ultimately, this entire field is just
-                * stale and wrong, and it will cause errors if not cleared.
-                * One example is:
-                *
-                *  migrate_vma_pages()
-                *    migrate_vma_insert_page()
-                *      page_add_new_anon_rmap()
-                *        __page_set_anon_rmap()
-                *          ...checks page->mapping, via PageAnon(page) call,
-                *            and incorrectly concludes that the page is an
-                *            anonymous page. Therefore, it incorrectly,
-                *            silently fails to set up the new anon rmap.
-                *
-                * For other types of ZONE_DEVICE pages, migration is either
-                * handled differently or not done at all, so there is no need
-                * to clear page->mapping.
-                */
-               if (is_device_private_page(page))
-                       page->mapping = NULL;
+       /* notify page idle for dax */
+       if (!is_device_private_page(page)) {
+               wake_up_var(&page->_refcount);
+               return;
+       }
 
-               page->pgmap->ops->page_free(page);
-       } else if (!count)
-               __put_page(page);
+       /* Clear Active bit in case of parallel mark_page_accessed */
+       __ClearPageActive(page);
+       __ClearPageWaiters(page);
+
+       mem_cgroup_uncharge(page);
+
+       /*
+        * When a device_private page is freed, the page->mapping field
+        * may still contain a (stale) mapping value. For example, the
+        * lower bits of page->mapping may still identify the page as an
+        * anonymous page. Ultimately, this entire field is just stale
+        * and wrong, and it will cause errors if not cleared.  One
+        * example is:
+        *
+        *  migrate_vma_pages()
+        *    migrate_vma_insert_page()
+        *      page_add_new_anon_rmap()
+        *        __page_set_anon_rmap()
+        *          ...checks page->mapping, via PageAnon(page) call,
+        *            and incorrectly concludes that the page is an
+        *            anonymous page. Therefore, it incorrectly,
+        *            silently fails to set up the new anon rmap.
+        *
+        * For other types of ZONE_DEVICE pages, migration is either
+        * handled differently or not done at all, so there is no need
+        * to clear page->mapping.
+        */
+       page->mapping = NULL;
+       page->pgmap->ops->page_free(page);
 }
 EXPORT_SYMBOL(__put_devmap_managed_page);
 #endif /* CONFIG_DEV_PAGEMAP_OPS */