]> git.baikalelectronics.ru Git - kernel.git/commitdiff
powerpc: book3s64: convert to pin_user_pages() and put_user_page()
authorJohn Hubbard <jhubbard@nvidia.com>
Fri, 31 Jan 2020 06:13:28 +0000 (22:13 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2020 18:30:38 +0000 (10:30 -0800)
1. Convert from get_user_pages() to pin_user_pages().

2. As required by pin_user_pages(), release these pages via
   put_user_page().

Link: http://lkml.kernel.org/r/20200107224558.2362728-21-jhubbard@nvidia.com
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Björn Töpel <bjorn.topel@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Leon Romanovsky <leonro@mellanox.com>
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/mm/book3s64/iommu_api.c

index 56cc845205779b17def2e37ca728f01e046efd05..a8654782203467e7d9dd6925267a6c3d1de3fd45 100644 (file)
@@ -103,7 +103,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
        for (entry = 0; entry < entries; entry += chunk) {
                unsigned long n = min(entries - entry, chunk);
 
-               ret = get_user_pages(ua + (entry << PAGE_SHIFT), n,
+               ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n,
                                FOLL_WRITE | FOLL_LONGTERM,
                                mem->hpages + entry, NULL);
                if (ret == n) {
@@ -167,9 +167,8 @@ good_exit:
        return 0;
 
 free_exit:
-       /* free the reference taken */
-       for (i = 0; i < pinned; i++)
-               put_page(mem->hpages[i]);
+       /* free the references taken */
+       put_user_pages(mem->hpages, pinned);
 
        vfree(mem->hpas);
        kfree(mem);
@@ -215,7 +214,8 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
                if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
                        SetPageDirty(page);
 
-               put_page(page);
+               put_user_page(page);
+
                mem->hpas[i] = 0;
        }
 }