]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/memremap_pages: convert to 'struct range'
authorDan Williams <dan.j.williams@intel.com>
Tue, 13 Oct 2020 23:50:29 +0000 (16:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Oct 2020 01:38:28 +0000 (18:38 -0700)
The 'struct resource' in 'struct dev_pagemap' is only used for holding
resource span information.  The other fields, 'name', 'flags', 'desc',
'parent', 'sibling', and 'child' are all unused wasted space.

This is in preparation for introducing a multi-range extension of
devm_memremap_pages().

The bulk of this change is unwinding all the places internal to libnvdimm
that used 'struct resource' unnecessarily, and replacing instances of
'struct dev_pagemap'.res with 'struct dev_pagemap'.range.

P2PDMA had a minor usage of the resource flags field, but only to report
failures with "%pR".  That is replaced with an open coded print of the
range.

[dan.carpenter@oracle.com: mm/hmm/test: use after free in dmirror_allocate_chunk()]
Link: https://lkml.kernel.org/r/20200926121402.GA7467@kadam
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen]
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brice Goglin <Brice.Goglin@inria.fr>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hulk Robot <hulkci@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Jason Yan <yanaijie@huawei.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jia He <justin.he@arm.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lkml.kernel.org/r/159643103173.4062302.768998885691711532.stgit@dwillia2-desk3.amr.corp.intel.com
Link: https://lkml.kernel.org/r/160106115761.30709.13539840236873663620.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
21 files changed:
arch/powerpc/kvm/book3s_hv_uvmem.c
drivers/dax/bus.c
drivers/dax/bus.h
drivers/dax/dax-private.h
drivers/dax/device.c
drivers/dax/hmem/hmem.c
drivers/dax/pmem/core.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/nvdimm/badrange.c
drivers/nvdimm/claim.c
drivers/nvdimm/nd.h
drivers/nvdimm/pfn_devs.c
drivers/nvdimm/pmem.c
drivers/nvdimm/region.c
drivers/pci/p2pdma.c
drivers/xen/unpopulated-alloc.c
include/linux/memremap.h
include/linux/range.h
lib/test_hmm.c
mm/memremap.c
tools/testing/nvdimm/test/iomap.c

index 7705d55572395bee32c798da5602d387a114a813..29ec555055c262bf39e5cd250ba0652798165755 100644 (file)
@@ -687,9 +687,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
        struct kvmppc_uvmem_page_pvt *pvt;
        unsigned long pfn_last, pfn_first;
 
-       pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT;
+       pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
        pfn_last = pfn_first +
-                  (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT);
+                  (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
 
        spin_lock(&kvmppc_uvmem_bitmap_lock);
        bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
@@ -1007,7 +1007,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
 static void kvmppc_uvmem_page_free(struct page *page)
 {
        unsigned long pfn = page_to_pfn(page) -
-                       (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT);
+                       (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
        struct kvmppc_uvmem_page_pvt *pvt;
 
        spin_lock(&kvmppc_uvmem_bitmap_lock);
@@ -1170,7 +1170,8 @@ int kvmppc_uvmem_init(void)
        }
 
        kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
-       kvmppc_uvmem_pgmap.res = *res;
+       kvmppc_uvmem_pgmap.range.start = res->start;
+       kvmppc_uvmem_pgmap.range.end = res->end;
        kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
        /* just one global instance: */
        kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
@@ -1205,7 +1206,7 @@ void kvmppc_uvmem_free(void)
                return;
 
        memunmap_pages(&kvmppc_uvmem_pgmap);
-       release_mem_region(kvmppc_uvmem_pgmap.res.start,
-                          resource_size(&kvmppc_uvmem_pgmap.res));
+       release_mem_region(kvmppc_uvmem_pgmap.range.start,
+                          range_len(&kvmppc_uvmem_pgmap.range));
        kfree(kvmppc_uvmem_bitmap);
 }
index 53d07f2f1285912129650db912e19b665ba39c80..00fa73a8dfb4c9caa2ac295041e8a0c35e452627 100644 (file)
@@ -515,7 +515,7 @@ static void dax_region_unregister(void *region)
 }
 
 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
-               struct resource *res, int target_node, unsigned int align,
+               struct range *range, int target_node, unsigned int align,
                unsigned long flags)
 {
        struct dax_region *dax_region;
@@ -530,8 +530,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
                return NULL;
        }
 
-       if (!IS_ALIGNED(res->start, align)
-                       || !IS_ALIGNED(resource_size(res), align))
+       if (!IS_ALIGNED(range->start, align)
+                       || !IS_ALIGNED(range_len(range), align))
                return NULL;
 
        dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
@@ -546,8 +546,8 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
        dax_region->target_node = target_node;
        ida_init(&dax_region->ida);
        dax_region->res = (struct resource) {
-               .start = res->start,
-               .end = res->end,
+               .start = range->start,
+               .end = range->end,
                .flags = IORESOURCE_MEM | flags,
        };
 
index da27ea70a19a4e3c2bcc26ec6a69485ae1377c8a..72b92f95509f7ece65ab7e7b22dfb66915e019c0 100644 (file)
@@ -13,7 +13,7 @@ void dax_region_put(struct dax_region *dax_region);
 
 #define IORESOURCE_DAX_STATIC (1UL << 0)
 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
-               struct resource *res, int target_node, unsigned int align,
+               struct range *range, int target_node, unsigned int align,
                unsigned long flags);
 
 enum dev_dax_subsys {
index b81a1494d82bb432f9310d658ccbe1ceaab2d9cc..0cbb2ec81ca727dcfba8b4aa8ceae213cfb28ebf 100644 (file)
@@ -61,11 +61,6 @@ struct dev_dax {
        struct range range;
 };
 
-static inline u64 range_len(struct range *range)
-{
-       return range->end - range->start + 1;
-}
-
 static inline struct dev_dax *to_dev_dax(struct device *dev)
 {
        return container_of(dev, struct dev_dax, dev);
index 9833fa83b5379847a6aecfa49ad592055e5f82e0..a14448bca83d187dddcfa3c4d79119216f956385 100644 (file)
@@ -416,8 +416,7 @@ int dev_dax_probe(struct dev_dax *dev_dax)
                pgmap = devm_kzalloc(dev, sizeof(*pgmap), GFP_KERNEL);
                if (!pgmap)
                        return -ENOMEM;
-               pgmap->res.start = range->start;
-               pgmap->res.end = range->end;
+               pgmap->range = *range;
        }
        pgmap->type = MEMORY_DEVICE_GENERIC;
        addr = devm_memremap_pages(dev, pgmap);
index aa260009dfc79d322ae1438176aacd8e2115cb7e..1a3347bb6143e2c1bec98605b58a0eb07b2fed3a 100644 (file)
@@ -13,13 +13,16 @@ static int dax_hmem_probe(struct platform_device *pdev)
        struct dev_dax_data data;
        struct dev_dax *dev_dax;
        struct resource *res;
+       struct range range;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
                return -ENOMEM;
 
        mri = dev->platform_data;
-       dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
+       range.start = res->start;
+       range.end = res->end;
+       dax_region = alloc_dax_region(dev, pdev->id, &range, mri->target_node,
                        PMD_SIZE, 0);
        if (!dax_region)
                return -ENOMEM;
index 4fe7008843389f098ef3814f4b65e96169397279..62b26bfceab121589b781e2348dfe6379e12d7ff 100644 (file)
@@ -9,7 +9,7 @@
 
 struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
 {
-       struct resource res;
+       struct range range;
        int rc, id, region_id;
        resource_size_t offset;
        struct nd_pfn_sb *pfn_sb;
@@ -50,10 +50,10 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
        if (rc != 2)
                return ERR_PTR(-EINVAL);
 
-       /* adjust the dax_region resource to the start of data */
-       memcpy(&res, &pgmap.res, sizeof(res));
-       res.start += offset;
-       dax_region = alloc_dax_region(dev, region_id, &res,
+       /* adjust the dax_region range to the start of data */
+       range = pgmap.range;
+       range.start += offset,
+       dax_region = alloc_dax_region(dev, region_id, &range,
                        nd_region->target_node, le32_to_cpu(pfn_sb->align),
                        IORESOURCE_DAX_STATIC);
        if (!dax_region)
@@ -64,7 +64,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
                .id = id,
                .pgmap = &pgmap,
                .subsys = subsys,
-               .size = resource_size(&res),
+               .size = range_len(&range),
        };
        dev_dax = devm_create_dev_dax(&data);
 
index 4e8112fde3e6be502e4528e6f98bc04c125c6c5e..25811ed7e2744950208f8cd25d8a077f805d5803 100644 (file)
@@ -101,7 +101,7 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
 {
        struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
        unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
-                               chunk->pagemap.res.start;
+                               chunk->pagemap.range.start;
 
        return chunk->bo->offset + off;
 }
@@ -249,7 +249,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
 
        chunk->drm = drm;
        chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
-       chunk->pagemap.res = *res;
+       chunk->pagemap.range.start = res->start;
+       chunk->pagemap.range.end = res->end;
        chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
        chunk->pagemap.owner = drm->dev;
 
@@ -273,7 +274,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
        list_add(&chunk->list, &drm->dmem->chunks);
        mutex_unlock(&drm->dmem->mutex);
 
-       pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
+       pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
        page = pfn_to_page(pfn_first);
        spin_lock(&drm->dmem->lock);
        for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
@@ -294,8 +295,7 @@ out_bo_unpin:
 out_bo_free:
        nouveau_bo_ref(NULL, &chunk->bo);
 out_release:
-       release_mem_region(chunk->pagemap.res.start,
-                          resource_size(&chunk->pagemap.res));
+       release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
 out_free:
        kfree(chunk);
 out:
@@ -382,8 +382,8 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
                nouveau_bo_ref(NULL, &chunk->bo);
                list_del(&chunk->list);
                memunmap_pages(&chunk->pagemap);
-               release_mem_region(chunk->pagemap.res.start,
-                                  resource_size(&chunk->pagemap.res));
+               release_mem_region(chunk->pagemap.range.start,
+                                  range_len(&chunk->pagemap.range));
                kfree(chunk);
        }
 
index b9eeefa27e3a507c16e60e186904bafc58f8b710..aaf6e215a8c6dad386faf102c94a9e49d0481aaa 100644 (file)
@@ -211,7 +211,7 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
 }
 
 static void badblocks_populate(struct badrange *badrange,
-               struct badblocks *bb, const struct resource *res)
+               struct badblocks *bb, const struct range *range)
 {
        struct badrange_entry *bre;
 
@@ -222,34 +222,34 @@ static void badblocks_populate(struct badrange *badrange,
                u64 bre_end = bre->start + bre->length - 1;
 
                /* Discard intervals with no intersection */
-               if (bre_end < res->start)
+               if (bre_end < range->start)
                        continue;
-               if (bre->start >  res->end)
+               if (bre->start > range->end)
                        continue;
                /* Deal with any overlap after start of the namespace */
-               if (bre->start >= res->start) {
+               if (bre->start >= range->start) {
                        u64 start = bre->start;
                        u64 len;
 
-                       if (bre_end <= res->end)
+                       if (bre_end <= range->end)
                                len = bre->length;
                        else
-                               len = res->start + resource_size(res)
+                               len = range->start + range_len(range)
                                        - bre->start;
-                       __add_badblock_range(bb, start - res->start, len);
+                       __add_badblock_range(bb, start - range->start, len);
                        continue;
                }
                /*
                 * Deal with overlap for badrange starting before
                 * the namespace.
                 */
-               if (bre->start < res->start) {
+               if (bre->start < range->start) {
                        u64 len;
 
-                       if (bre_end < res->end)
-                               len = bre->start + bre->length - res->start;
+                       if (bre_end < range->end)
+                               len = bre->start + bre->length - range->start;
                        else
-                               len = resource_size(res);
+                               len = range_len(range);
                        __add_badblock_range(bb, 0, len);
                }
        }
@@ -267,7 +267,7 @@ static void badblocks_populate(struct badrange *badrange,
  * and add badblocks entries for all matching sub-ranges
  */
 void nvdimm_badblocks_populate(struct nd_region *nd_region,
-               struct badblocks *bb, const struct resource *res)
+               struct badblocks *bb, const struct range *range)
 {
        struct nvdimm_bus *nvdimm_bus;
 
@@ -279,7 +279,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
        nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
 
        nvdimm_bus_lock(&nvdimm_bus->dev);
-       badblocks_populate(&nvdimm_bus->badrange, bb, res);
+       badblocks_populate(&nvdimm_bus->badrange, bb, range);
        nvdimm_bus_unlock(&nvdimm_bus->dev);
 }
 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
index 22d865ba6353d23564039b0131c8759dd5790e09..5a7c80053c6245fbafeaf104ddb48ee53be30f23 100644 (file)
@@ -303,13 +303,16 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
                resource_size_t size)
 {
-       struct resource *res = &nsio->res;
        struct nd_namespace_common *ndns = &nsio->common;
+       struct range range = {
+               .start = nsio->res.start,
+               .end = nsio->res.end,
+       };
 
        nsio->size = size;
-       if (!devm_request_mem_region(dev, res->start, size,
+       if (!devm_request_mem_region(dev, range.start, size,
                                dev_name(&ndns->dev))) {
-               dev_warn(dev, "could not reserve region %pR\n", res);
+               dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
                return -EBUSY;
        }
 
@@ -317,9 +320,9 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
        if (devm_init_badblocks(dev, &nsio->bb))
                return -ENOMEM;
        nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
-                       &nsio->res);
+                       &range);
 
-       nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
+       nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
 
        return PTR_ERR_OR_ZERO(nsio->addr);
 }
index 72740108ba4206b7407f56134d2b1b141954e4f0..696b55556d4d27a879ae6c21ed986ec395cd3113 100644 (file)
@@ -377,8 +377,9 @@ int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
                char *name);
 unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
+struct range;
 void nvdimm_badblocks_populate(struct nd_region *nd_region,
-               struct badblocks *bb, const struct resource *res);
+               struct badblocks *bb, const struct range *range);
 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
                resource_size_t size);
 void devm_namespace_disable(struct device *dev,
index 3e11ef8d3f5b622d0970f0651189325bfe47e912..3c4787b92a6a684846980252313f60fa5d65180a 100644 (file)
@@ -672,7 +672,7 @@ static unsigned long init_altmap_reserve(resource_size_t base)
 
 static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
 {
-       struct resource *res = &pgmap->res;
+       struct range *range = &pgmap->range;
        struct vmem_altmap *altmap = &pgmap->altmap;
        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
        u64 offset = le64_to_cpu(pfn_sb->dataoff);
@@ -689,16 +689,16 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
                .end_pfn = PHYS_PFN(end),
        };
 
-       memcpy(res, &nsio->res, sizeof(*res));
-       res->start += start_pad;
-       res->end -= end_trunc;
-
+       *range = (struct range) {
+               .start = nsio->res.start + start_pad,
+               .end = nsio->res.end - end_trunc,
+       };
        if (nd_pfn->mode == PFN_MODE_RAM) {
                if (offset < reserve)
                        return -EINVAL;
                nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
        } else if (nd_pfn->mode == PFN_MODE_PMEM) {
-               nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
+               nd_pfn->npfns = PHYS_PFN((range_len(range) - offset));
                if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
                        dev_info(&nd_pfn->dev,
                                        "number of pfns truncated from %lld to %ld\n",
index c86a0ceaece60901cbea58e0c016dbced8ac9c6a..1f394f44838fa91a4743368d3a5db14886205f6e 100644 (file)
@@ -375,7 +375,7 @@ static int pmem_attach_disk(struct device *dev,
        struct nd_region *nd_region = to_nd_region(dev->parent);
        int nid = dev_to_node(dev), fua;
        struct resource *res = &nsio->res;
-       struct resource bb_res;
+       struct range bb_range;
        struct nd_pfn *nd_pfn = NULL;
        struct dax_device *dax_dev;
        struct nd_pfn_sb *pfn_sb;
@@ -434,24 +434,26 @@ static int pmem_attach_disk(struct device *dev,
                pfn_sb = nd_pfn->pfn_sb;
                pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
                pmem->pfn_pad = resource_size(res) -
-                       resource_size(&pmem->pgmap.res);
+                       range_len(&pmem->pgmap.range);
                pmem->pfn_flags |= PFN_MAP;
-               memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
-               bb_res.start += pmem->data_offset;
+               bb_range = pmem->pgmap.range;
+               bb_range.start += pmem->data_offset;
        } else if (pmem_should_map_pages(dev)) {
-               memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
+               pmem->pgmap.range.start = res->start;
+               pmem->pgmap.range.end = res->end;
                pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
                pmem->pgmap.ops = &fsdax_pagemap_ops;
                addr = devm_memremap_pages(dev, &pmem->pgmap);
                pmem->pfn_flags |= PFN_MAP;
-               memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
+               bb_range = pmem->pgmap.range;
        } else {
                if (devm_add_action_or_reset(dev, pmem_release_queue,
                                        &pmem->pgmap))
                        return -ENOMEM;
                addr = devm_memremap(dev, pmem->phys_addr,
                                pmem->size, ARCH_MEMREMAP_PMEM);
-               memcpy(&bb_res, &nsio->res, sizeof(bb_res));
+               bb_range.start =  res->start;
+               bb_range.end = res->end;
        }
 
        if (IS_ERR(addr))
@@ -480,7 +482,7 @@ static int pmem_attach_disk(struct device *dev,
                        / 512);
        if (devm_init_badblocks(dev, &pmem->bb))
                return -ENOMEM;
-       nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
+       nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
        disk->bb = &pmem->bb;
 
        if (is_nvdimm_sync(nd_region))
@@ -591,8 +593,8 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
        resource_size_t offset = 0, end_trunc = 0;
        struct nd_namespace_common *ndns;
        struct nd_namespace_io *nsio;
-       struct resource res;
        struct badblocks *bb;
+       struct range range;
        struct kernfs_node *bb_state;
 
        if (event != NVDIMM_REVALIDATE_POISON)
@@ -628,9 +630,9 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
                nsio = to_nd_namespace_io(&ndns->dev);
        }
 
-       res.start = nsio->res.start + offset;
-       res.end = nsio->res.end - end_trunc;
-       nvdimm_badblocks_populate(nd_region, bb, &res);
+       range.start = nsio->res.start + offset;
+       range.end = nsio->res.end - end_trunc;
+       nvdimm_badblocks_populate(nd_region, bb, &range);
        if (bb_state)
                sysfs_notify_dirent(bb_state);
 }
index 0f6978e72e7cd79e38f8353d87c97b5ce8ca52d2..bfce87ed72ab4dff8d3f593bd33dafb3784ced5e 100644 (file)
@@ -35,7 +35,10 @@ static int nd_region_probe(struct device *dev)
                return rc;
 
        if (is_memory(&nd_region->dev)) {
-               struct resource ndr_res;
+               struct range range = {
+                       .start = nd_region->ndr_start,
+                       .end = nd_region->ndr_start + nd_region->ndr_size - 1,
+               };
 
                if (devm_init_badblocks(dev, &nd_region->bb))
                        return -ENODEV;
@@ -44,9 +47,7 @@ static int nd_region_probe(struct device *dev)
                if (!nd_region->bb_state)
                        dev_warn(&nd_region->dev,
                                        "'badblocks' notification disabled\n");
-               ndr_res.start = nd_region->ndr_start;
-               ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
-               nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
+               nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
        }
 
        rc = nd_region_register_namespaces(nd_region, &err);
@@ -121,14 +122,16 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
 {
        if (event == NVDIMM_REVALIDATE_POISON) {
                struct nd_region *nd_region = to_nd_region(dev);
-               struct resource res;
 
                if (is_memory(&nd_region->dev)) {
-                       res.start = nd_region->ndr_start;
-                       res.end = nd_region->ndr_start +
-                               nd_region->ndr_size - 1;
+                       struct range range = {
+                               .start = nd_region->ndr_start,
+                               .end = nd_region->ndr_start +
+                                       nd_region->ndr_size - 1,
+                       };
+
                        nvdimm_badblocks_populate(nd_region,
-                                       &nd_region->bb, &res);
+                                       &nd_region->bb, &range);
                        if (nd_region->bb_state)
                                sysfs_notify_dirent(nd_region->bb_state);
                }
index f357f9a32b3a57256abdcf24b8172e69a9bdfb37..256850513813017b768cd2a1d3a2d386542320ee 100644 (file)
@@ -185,9 +185,8 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
                return -ENOMEM;
 
        pgmap = &p2p_pgmap->pgmap;
-       pgmap->res.start = pci_resource_start(pdev, bar) + offset;
-       pgmap->res.end = pgmap->res.start + size - 1;
-       pgmap->res.flags = pci_resource_flags(pdev, bar);
+       pgmap->range.start = pci_resource_start(pdev, bar) + offset;
+       pgmap->range.end = pgmap->range.start + size - 1;
        pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
 
        p2p_pgmap->provider = pdev;
@@ -202,13 +201,13 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
 
        error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
                        pci_bus_address(pdev, bar) + offset,
-                       resource_size(&pgmap->res), dev_to_node(&pdev->dev),
+                       range_len(&pgmap->range), dev_to_node(&pdev->dev),
                        pgmap->ref);
        if (error)
                goto pages_free;
 
-       pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
-                &pgmap->res);
+       pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n",
+                pgmap->range.start, pgmap->range.end);
 
        return 0;
 
index 3b98dc921426898c3dd884bafc65088d215e59de..091b8669eca394905258192f757614b8b2720c60 100644 (file)
@@ -18,27 +18,37 @@ static unsigned int list_count;
 static int fill_list(unsigned int nr_pages)
 {
        struct dev_pagemap *pgmap;
+       struct resource *res;
        void *vaddr;
        unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
-       int ret;
+       int ret = -ENOMEM;
+
+       res = kzalloc(sizeof(*res), GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
 
        pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
        if (!pgmap)
-               return -ENOMEM;
+               goto err_pgmap;
 
        pgmap->type = MEMORY_DEVICE_GENERIC;
-       pgmap->res.name = "Xen scratch";
-       pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       res->name = "Xen scratch";
+       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 
-       ret = allocate_resource(&iomem_resource, &pgmap->res,
+       ret = allocate_resource(&iomem_resource, res,
                                alloc_pages * PAGE_SIZE, 0, -1,
                                PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
        if (ret < 0) {
                pr_err("Cannot allocate new IOMEM resource\n");
-               kfree(pgmap);
-               return ret;
+               goto err_resource;
        }
 
+       pgmap->range = (struct range) {
+               .start = res->start,
+               .end = res->end,
+       };
+       pgmap->owner = res;
+
 #ifdef CONFIG_XEN_HAVE_PVMMU
         /*
          * memremap will build page tables for the new memory so
@@ -50,14 +60,13 @@ static int fill_list(unsigned int nr_pages)
          * conflict with any devices.
          */
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-               xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
+               xen_pfn_t pfn = PFN_DOWN(res->start);
 
                for (i = 0; i < alloc_pages; i++) {
                        if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
                                pr_warn("set_phys_to_machine() failed, no memory added\n");
-                               release_resource(&pgmap->res);
-                               kfree(pgmap);
-                               return -ENOMEM;
+                               ret = -ENOMEM;
+                               goto err_memremap;
                        }
                 }
        }
@@ -66,9 +75,8 @@ static int fill_list(unsigned int nr_pages)
        vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
        if (IS_ERR(vaddr)) {
                pr_err("Cannot remap memory range\n");
-               release_resource(&pgmap->res);
-               kfree(pgmap);
-               return PTR_ERR(vaddr);
+               ret = PTR_ERR(vaddr);
+               goto err_memremap;
        }
 
        for (i = 0; i < alloc_pages; i++) {
@@ -80,6 +88,14 @@ static int fill_list(unsigned int nr_pages)
        }
 
        return 0;
+
+err_memremap:
+       release_resource(res);
+err_resource:
+       kfree(pgmap);
+err_pgmap:
+       kfree(res);
+       return ret;
 }
 
 /**
index e5862746751b1e72f17b0f7efe318e470a7ef79a..d0dd261d87c046c3a93796d3e18a6e198ae4cf47 100644 (file)
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _LINUX_MEMREMAP_H_
 #define _LINUX_MEMREMAP_H_
+#include <linux/range.h>
 #include <linux/ioport.h>
 #include <linux/percpu-refcount.h>
 
@@ -93,7 +94,7 @@ struct dev_pagemap_ops {
 /**
  * struct dev_pagemap - metadata for ZONE_DEVICE mappings
  * @altmap: pre-allocated/reserved memory for vmemmap allocations
- * @res: physical address range covered by @ref
+ * @range: physical address range covered by @ref
  * @ref: reference count that pins the devm_memremap_pages() mapping
  * @internal_ref: internal reference if @ref is not provided by the caller
  * @done: completion for @internal_ref
@@ -106,7 +107,7 @@ struct dev_pagemap_ops {
  */
 struct dev_pagemap {
        struct vmem_altmap altmap;
-       struct resource res;
+       struct range range;
        struct percpu_ref *ref;
        struct percpu_ref internal_ref;
        struct completion done;
index d1fbeb66401241f3f0a0dfdbc6deebef8eb63428..274681cc3154809cad3d5c715bca693a608a8b9c 100644 (file)
@@ -1,12 +1,18 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _LINUX_RANGE_H
 #define _LINUX_RANGE_H
+#include <linux/types.h>
 
 struct range {
        u64   start;
        u64   end;
 };
 
+static inline u64 range_len(const struct range *range)
+{
+       return range->end - range->start + 1;
+}
+
 int add_range(struct range *range, int az, int nr_range,
                u64 start, u64 end);
 
index e7dc3de355b7e658c69a5b6999279969dff9a9b5..e97ca8ec0bce9ddaf3cfa28609f7b24316606fde 100644 (file)
@@ -460,6 +460,21 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
        unsigned long pfn_last;
        void *ptr;
 
+       devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+       if (!devmem)
+               return -ENOMEM;
+
+       res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+                                     "hmm_dmirror");
+       if (IS_ERR(res))
+               goto err_devmem;
+
+       devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+       devmem->pagemap.range.start = res->start;
+       devmem->pagemap.range.end = res->end;
+       devmem->pagemap.ops = &dmirror_devmem_ops;
+       devmem->pagemap.owner = mdevice;
+
        mutex_lock(&mdevice->devmem_lock);
 
        if (mdevice->devmem_count == mdevice->devmem_capacity) {
@@ -472,33 +487,18 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
                                sizeof(new_chunks[0]) * new_capacity,
                                GFP_KERNEL);
                if (!new_chunks)
-                       goto err;
+                       goto err_release;
                mdevice->devmem_capacity = new_capacity;
                mdevice->devmem_chunks = new_chunks;
        }
 
-       res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
-                                       "hmm_dmirror");
-       if (IS_ERR(res))
-               goto err;
-
-       devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
-       if (!devmem)
-               goto err_release;
-
-       devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
-       devmem->pagemap.res = *res;
-       devmem->pagemap.ops = &dmirror_devmem_ops;
-       devmem->pagemap.owner = mdevice;
-
        ptr = memremap_pages(&devmem->pagemap, numa_node_id());
        if (IS_ERR(ptr))
-               goto err_free;
+               goto err_release;
 
        devmem->mdevice = mdevice;
-       pfn_first = devmem->pagemap.res.start >> PAGE_SHIFT;
-       pfn_last = pfn_first +
-               (resource_size(&devmem->pagemap.res) >> PAGE_SHIFT);
+       pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
+       pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
        mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
 
        mutex_unlock(&mdevice->devmem_lock);
@@ -525,12 +525,12 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
 
        return true;
 
-err_free:
-       kfree(devmem);
 err_release:
-       release_mem_region(res->start, resource_size(res));
-err:
        mutex_unlock(&mdevice->devmem_lock);
+       release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
+err_devmem:
+       kfree(devmem);
+
        return false;
 }
 
@@ -1100,8 +1100,8 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
                                mdevice->devmem_chunks[i];
 
                        memunmap_pages(&devmem->pagemap);
-                       release_mem_region(devmem->pagemap.res.start,
-                                          resource_size(&devmem->pagemap.res));
+                       release_mem_region(devmem->pagemap.range.start,
+                                          range_len(&devmem->pagemap.range));
                        kfree(devmem);
                }
                kfree(mdevice->devmem_chunks);
index 006dace60b1a9d8e331f34c24dcea9f96036cdb9..d958d348b3cac44bfd98d8e3c3f5722579616850 100644 (file)
@@ -70,24 +70,24 @@ static void devmap_managed_enable_put(void)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
-static void pgmap_array_delete(struct resource *res)
+static void pgmap_array_delete(struct range *range)
 {
-       xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
+       xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
                        NULL, GFP_KERNEL);
        synchronize_rcu();
 }
 
 static unsigned long pfn_first(struct dev_pagemap *pgmap)
 {
-       return PHYS_PFN(pgmap->res.start) +
+       return PHYS_PFN(pgmap->range.start) +
                vmem_altmap_offset(pgmap_altmap(pgmap));
 }
 
 static unsigned long pfn_end(struct dev_pagemap *pgmap)
 {
-       const struct resource *res = &pgmap->res;
+       const struct range *range = &pgmap->range;
 
-       return (res->start + resource_size(res)) >> PAGE_SHIFT;
+       return (range->start + range_len(range)) >> PAGE_SHIFT;
 }
 
 static unsigned long pfn_next(unsigned long pfn)
@@ -126,7 +126,7 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
 
 void memunmap_pages(struct dev_pagemap *pgmap)
 {
-       struct resource *res = &pgmap->res;
+       struct range *range = &pgmap->range;
        struct page *first_page;
        unsigned long pfn;
        int nid;
@@ -143,20 +143,20 @@ void memunmap_pages(struct dev_pagemap *pgmap)
        nid = page_to_nid(first_page);
 
        mem_hotplug_begin();
-       remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start),
-                                  PHYS_PFN(resource_size(res)));
+       remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
+                                  PHYS_PFN(range_len(range)));
        if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               __remove_pages(PHYS_PFN(res->start),
-                              PHYS_PFN(resource_size(res)), NULL);
+               __remove_pages(PHYS_PFN(range->start),
+                              PHYS_PFN(range_len(range)), NULL);
        } else {
-               arch_remove_memory(nid, res->start, resource_size(res),
+               arch_remove_memory(nid, range->start, range_len(range),
                                pgmap_altmap(pgmap));
-               kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+               kasan_remove_zero_shadow(__va(range->start), range_len(range));
        }
        mem_hotplug_done();
 
-       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
-       pgmap_array_delete(res);
+       untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
+       pgmap_array_delete(range);
        WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
        devmap_managed_enable_put();
 }
@@ -182,7 +182,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref)
  */
 void *memremap_pages(struct dev_pagemap *pgmap, int nid)
 {
-       struct resource *res = &pgmap->res;
+       struct range *range = &pgmap->range;
        struct dev_pagemap *conflict_pgmap;
        struct mhp_params params = {
                /*
@@ -251,7 +251,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
                        return ERR_PTR(error);
        }
 
-       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
        if (conflict_pgmap) {
                WARN(1, "Conflicting mapping in same section\n");
                put_dev_pagemap(conflict_pgmap);
@@ -259,7 +259,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
                goto err_array;
        }
 
-       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
        if (conflict_pgmap) {
                WARN(1, "Conflicting mapping in same section\n");
                put_dev_pagemap(conflict_pgmap);
@@ -267,26 +267,27 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
                goto err_array;
        }
 
-       is_ram = region_intersects(res->start, resource_size(res),
+       is_ram = region_intersects(range->start, range_len(range),
                IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
        if (is_ram != REGION_DISJOINT) {
-               WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
-                               is_ram == REGION_MIXED ? "mixed" : "ram", res);
+               WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
+                               is_ram == REGION_MIXED ? "mixed" : "ram",
+                               range->start, range->end);
                error = -ENXIO;
                goto err_array;
        }
 
-       error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
-                               PHYS_PFN(res->end), pgmap, GFP_KERNEL));
+       error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
+                               PHYS_PFN(range->end), pgmap, GFP_KERNEL));
        if (error)
                goto err_array;
 
        if (nid < 0)
                nid = numa_mem_id();
 
-       error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(res->start),
-                               0, resource_size(res));
+       error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(range->start), 0,
+                       range_len(range));
        if (error)
                goto err_pfn_remap;
 
@@ -304,16 +305,16 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
         * arch_add_memory().
         */
        if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               error = add_pages(nid, PHYS_PFN(res->start),
-                               PHYS_PFN(resource_size(res)), &params);
+               error = add_pages(nid, PHYS_PFN(range->start),
+                               PHYS_PFN(range_len(range)), &params);
        } else {
-               error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
+               error = kasan_add_zero_shadow(__va(range->start), range_len(range));
                if (error) {
                        mem_hotplug_done();
                        goto err_kasan;
                }
 
-               error = arch_add_memory(nid, res->start, resource_size(res),
+               error = arch_add_memory(nid, range->start, range_len(range),
                                        &params);
        }
 
@@ -321,8 +322,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
                struct zone *zone;
 
                zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
-               move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
-                               PHYS_PFN(resource_size(res)), params.altmap);
+               move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
+                               PHYS_PFN(range_len(range)), params.altmap);
        }
 
        mem_hotplug_done();
@@ -334,17 +335,17 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
         * to allow us to do the work while not holding the hotplug lock.
         */
        memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
-                               PHYS_PFN(res->start),
-                               PHYS_PFN(resource_size(res)), pgmap);
+                               PHYS_PFN(range->start),
+                               PHYS_PFN(range_len(range)), pgmap);
        percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
-       return __va(res->start);
+       return __va(range->start);
 
  err_add_memory:
-       kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+       kasan_remove_zero_shadow(__va(range->start), range_len(range));
  err_kasan:
-       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
+       untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
  err_pfn_remap:
-       pgmap_array_delete(res);
+       pgmap_array_delete(range);
  err_array:
        dev_pagemap_kill(pgmap);
        dev_pagemap_cleanup(pgmap);
@@ -369,7 +370,7 @@ EXPORT_SYMBOL_GPL(memremap_pages);
  *    'live' on entry and will be killed and reaped at
  *    devm_memremap_pages_release() time, or if this routine fails.
  *
- * 4/ res is expected to be a host memory range that could feasibly be
+ * 4/ range is expected to be a host memory range that could feasibly be
  *    treated as a "System RAM" range, i.e. not a device mmio range, but
  *    this is not enforced.
  */
@@ -426,7 +427,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
         * In the cached case we're already holding a live reference.
         */
        if (pgmap) {
-               if (phys >= pgmap->res.start && phys <= pgmap->res.end)
+               if (phys >= pgmap->range.start && phys <= pgmap->range.end)
                        return pgmap;
                put_dev_pagemap(pgmap);
        }
index 03e40b3b0106e1d3c429d5f88552cedf717895aa..c62d372d426fb3f58f049d9ff94684790107570c 100644 (file)
@@ -126,7 +126,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref)
 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 {
        int error;
-       resource_size_t offset = pgmap->res.start;
+       resource_size_t offset = pgmap->range.start;
        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 
        if (!nfit_res)