]> git.baikalelectronics.ru Git - kernel.git/commitdiff
IB/{core,hw,umem}: set FOLL_PIN via pin_user_pages*(), fix up ODP
authorJohn Hubbard <jhubbard@nvidia.com>
Fri, 31 Jan 2020 06:13:02 +0000 (22:13 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2020 18:30:37 +0000 (10:30 -0800)
Convert infiniband to use the new pin_user_pages*() calls.

Also, revert earlier changes to Infiniband ODP that had it using
put_user_page().  ODP is "Case 3" in
Documentation/core-api/pin_user_pages.rst, which is to say, normal
get_user_pages() and put_page() is the API to use there.

The new pin_user_pages*() calls replace corresponding get_user_pages*()
calls, and set the FOLL_PIN flag.  The FOLL_PIN flag requires that the
caller must return the pages via put_user_page*() calls, but infiniband
was already doing that as part of an earlier commit.

Link: http://lkml.kernel.org/r/20200107224558.2362728-14-jhubbard@nvidia.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Björn Töpel <bjorn.topel@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Leon Romanovsky <leonro@mellanox.com>
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/hfi1/user_pages.c
drivers/infiniband/hw/mthca/mthca_memfree.c
drivers/infiniband/hw/qib/qib_user_pages.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/sw/siw/siw_mem.c

index f995b50ee1ccd8ee122adeaacc89e0077080dcbb..aae5bfed7f3ba170596f85fbdc991bddcf03ec86 100644 (file)
@@ -257,7 +257,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
        sg = umem->sg_head.sgl;
 
        while (npages) {
-               ret = get_user_pages_fast(cur_base,
+               ret = pin_user_pages_fast(cur_base,
                                          min_t(unsigned long, npages,
                                                PAGE_SIZE /
                                                sizeof(struct page *)),
index dac3fd2ebc2648c29a7b84b95abc935e5c49bc2e..a71ce0ae203170d215a2d2686e5d5da9a41149bf 100644 (file)
@@ -293,9 +293,8 @@ EXPORT_SYMBOL(ib_umem_odp_release);
  * The function returns -EFAULT if the DMA mapping operation fails. It returns
  * -EAGAIN if a concurrent invalidation prevents us from updating the page.
  *
- * The page is released via put_user_page even if the operation failed. For
- * on-demand pinning, the page is released whenever it isn't stored in the
- * umem.
+ * The page is released via put_page even if the operation failed. For on-demand
+ * pinning, the page is released whenever it isn't stored in the umem.
  */
 static int ib_umem_odp_map_dma_single_page(
                struct ib_umem_odp *umem_odp,
@@ -348,7 +347,7 @@ static int ib_umem_odp_map_dma_single_page(
        }
 
 out:
-       put_user_page(page);
+       put_page(page);
        return ret;
 }
 
@@ -458,7 +457,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
                                        ret = -EFAULT;
                                        break;
                                }
-                               put_user_page(local_page_list[j]);
+                               put_page(local_page_list[j]);
                                continue;
                        }
 
@@ -485,8 +484,8 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
                         * ib_umem_odp_map_dma_single_page().
                         */
                        if (npages - (j + 1) > 0)
-                               put_user_pages(&local_page_list[j+1],
-                                              npages - (j + 1));
+                               release_pages(&local_page_list[j+1],
+                                             npages - (j + 1));
                        break;
                }
        }
index 469acb961fbd2e034c898cafd6d86d6150ac0f62..9a94761765c04f9583553bb67cbfb60cf1315905 100644 (file)
@@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
        int ret;
        unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
 
-       ret = get_user_pages_fast(vaddr, npages, gup_flags, pages);
+       ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
        if (ret < 0)
                return ret;
 
index edccfd6e178f020f229789c9cc235ea850479bfd..8269ab040c2198e44784db546f04b9e1e380ccb1 100644 (file)
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
                goto out;
        }
 
-       ret = get_user_pages_fast(uaddr & PAGE_MASK, 1,
+       ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
                                  FOLL_WRITE | FOLL_LONGTERM, pages);
        if (ret < 0)
                goto out;
index 6bf764e418919bc89360fe05a4d574e405ff7533..7fc4b5f81fcd94e23963af4e764e0f915d4857ad 100644 (file)
@@ -108,7 +108,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
 
        down_read(&current->mm->mmap_sem);
        for (got = 0; got < num_pages; got += ret) {
-               ret = get_user_pages(start_page + got * PAGE_SIZE,
+               ret = pin_user_pages(start_page + got * PAGE_SIZE,
                                     num_pages - got,
                                     FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
                                     p + got, NULL);
index 05190edc2611ef7dad6af79d7a550627c84486a9..1a3cc2957e3ac86cbb44c617044b6e615799dc4d 100644 (file)
@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
                else
                        j = npages;
 
-               ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
+               ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
                if (ret != j) {
                        i = 0;
                        j = ret;
index 62e6ffa9ad78efbd138c9d19a3db386cd4497ccd..600896727d349205f60950ff296efb6ca7605192 100644 (file)
@@ -141,7 +141,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
        ret = 0;
 
        while (npages) {
-               ret = get_user_pages(cur_base,
+               ret = pin_user_pages(cur_base,
                                     min_t(unsigned long, npages,
                                     PAGE_SIZE / sizeof(struct page *)),
                                     gup_flags | FOLL_LONGTERM,
index e99983f076631737cafec40b71d8a2eda9a8221e..e53b07dcfed5e32c80f7a1191bbe226766669795 100644 (file)
@@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
                while (nents) {
                        struct page **plist = &umem->page_chunk[i].plist[got];
 
-                       rv = get_user_pages(first_page_va, nents,
+                       rv = pin_user_pages(first_page_va, nents,
                                            foll_flags | FOLL_LONGTERM,
                                            plist, NULL);
                        if (rv < 0)