]> git.baikalelectronics.ru Git - kernel.git/commitdiff
xen: add helpers to allocate unpopulated memory
authorRoger Pau Monne <roger.pau@citrix.com>
Tue, 1 Sep 2020 08:33:26 +0000 (10:33 +0200)
committerJuergen Gross <jgross@suse.com>
Fri, 4 Sep 2020 08:00:01 +0000 (10:00 +0200)
To be used in order to create foreign mappings. This is based on the
ZONE_DEVICE facility which is used by persistent memory devices in
order to create struct pages and kernel virtual mappings for the IOMEM
areas of such devices. Note that on kernels without support for
ZONE_DEVICE Xen will fallback to use ballooned pages in order to
create foreign mappings.

The newly added helpers use the same parameters as the existing
{alloc/free}_xenballooned_pages functions, which allows for in-place
replacement of the callers. Once a memory region has been added to be
used as scratch mapping space it will no longer be released, and pages
returned are kept in a linked list. This allows to have a buffer of
pages and prevents resorting to frequent additions and removals of
regions.

If enabled (because ZONE_DEVICE is supported) the usage of the new
functionality untangles Xen balloon and RAM hotplug from the usage of
unpopulated physical memory ranges to map foreign pages, which is the
correct thing to do in order to avoid mappings of foreign pages depend
on memory hotplug.

Note the driver is currently not enabled on Arm platforms because it
would interfere with the identity mapping required on some platforms.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20200901083326.21264-4-roger.pau@citrix.com
Signed-off-by: Juergen Gross <jgross@suse.com>
drivers/gpu/drm/xen/xen_drm_front_gem.c
drivers/xen/Kconfig
drivers/xen/Makefile
drivers/xen/balloon.c
drivers/xen/grant-table.c
drivers/xen/privcmd.c
drivers/xen/unpopulated-alloc.c [new file with mode: 0644]
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xlate_mmu.c
include/xen/xen.h

index 39ff95b75357ddafc5daac2d0110b2adef7bffd6..534daf37c97ed8bbb133b13f27a91908c83001bc 100644 (file)
@@ -18,6 +18,7 @@
 #include <drm/drm_probe_helper.h>
 
 #include <xen/balloon.h>
+#include <xen/xen.h>
 
 #include "xen_drm_front.h"
 #include "xen_drm_front_gem.h"
@@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
                 * allocate ballooned pages which will be used to map
                 * grant references provided by the backend
                 */
-               ret = alloc_xenballooned_pages(xen_obj->num_pages,
-                                              xen_obj->pages);
+               ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
+                                                 xen_obj->pages);
                if (ret < 0) {
                        DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
                                  xen_obj->num_pages, ret);
@@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
        } else {
                if (xen_obj->pages) {
                        if (xen_obj->be_alloc) {
-                               free_xenballooned_pages(xen_obj->num_pages,
-                                                       xen_obj->pages);
+                               xen_free_unpopulated_pages(xen_obj->num_pages,
+                                                          xen_obj->pages);
                                gem_free_pages_array(xen_obj);
                        } else {
                                drm_gem_put_pages(&xen_obj->base,
index 46e7fd099904ed6d9ec8c6ecf5b91a4d9b42fadd..0ab54df82520731d4b35768fd8152153203feb9e 100644 (file)
@@ -324,4 +324,14 @@ config XEN_HAVE_VPMU
 config XEN_FRONT_PGDIR_SHBUF
        tristate
 
+config XEN_UNPOPULATED_ALLOC
+       bool "Use unpopulated memory ranges for guest mappings"
+       depends on X86 && ZONE_DEVICE
+       default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
+       help
+         Use unpopulated memory ranges in order to create mappings for guest
+         memory regions, including grant maps and foreign pages. This avoids
+         having to balloon out RAM regions in order to obtain physical memory
+         space to create such mappings.
+
 endmenu
index 0d322f3d90cdbc12c82801977e4203d20aa12c4a..3cca2be28824a2ed50257c5ba948032f11d7c854 100644 (file)
@@ -42,3 +42,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF)        += gntdev-dmabuf.o
 xen-gntalloc-y                         := gntalloc.o
 xen-privcmd-y                          := privcmd.o privcmd-buf.o
 obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF)    += xen-front-pgdir-shbuf.o
+obj-$(CONFIG_XEN_UNPOPULATED_ALLOC)    += unpopulated-alloc.o
index b1d8b028bf804ac4cc02574dc102f00cd56ec0bd..4bfbe71705e4d3b2726f8f9bcce9da75c4e80bb4 100644 (file)
@@ -654,7 +654,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
 }
 EXPORT_SYMBOL(free_xenballooned_pages);
 
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
 static void __init balloon_add_region(unsigned long start_pfn,
                                      unsigned long pages)
 {
@@ -708,7 +708,7 @@ static int __init balloon_init(void)
        register_sysctl_table(xen_root);
 #endif
 
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
        {
                int i;
 
index 8d06bf1cc3479c5a205b0d2f05782416f0454f3a..523dcdf39cc944d45b8275845bfe2335aae72302 100644 (file)
@@ -801,7 +801,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
 {
        int ret;
 
-       ret = alloc_xenballooned_pages(nr_pages, pages);
+       ret = xen_alloc_unpopulated_pages(nr_pages, pages);
        if (ret < 0)
                return ret;
 
@@ -836,7 +836,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
 void gnttab_free_pages(int nr_pages, struct page **pages)
 {
        gnttab_pages_clear_private(nr_pages, pages);
-       free_xenballooned_pages(nr_pages, pages);
+       xen_free_unpopulated_pages(nr_pages, pages);
 }
 EXPORT_SYMBOL_GPL(gnttab_free_pages);
 
index 095d683ad574ce25d0500471319da8ff499f2381..8bcb0ce223a5dd53623cf7ec40febaf89c0a7443 100644 (file)
@@ -425,7 +425,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
        if (pages == NULL)
                return -ENOMEM;
 
-       rc = alloc_xenballooned_pages(numpgs, pages);
+       rc = xen_alloc_unpopulated_pages(numpgs, pages);
        if (rc != 0) {
                pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
                        numpgs, rc);
@@ -896,7 +896,7 @@ static void privcmd_close(struct vm_area_struct *vma)
 
        rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
        if (rc == 0)
-               free_xenballooned_pages(numpgs, pages);
+               xen_free_unpopulated_pages(numpgs, pages);
        else
                pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
                        numpgs, rc);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
new file mode 100644 (file)
index 0000000..3b98dc9
--- /dev/null
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memremap.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+
+#include <xen/page.h>
+#include <xen/xen.h>
+
+static DEFINE_MUTEX(list_lock);
+static LIST_HEAD(page_list);
+static unsigned int list_count;
+
+static int fill_list(unsigned int nr_pages)
+{
+       struct dev_pagemap *pgmap;
+       void *vaddr;
+       unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
+       int ret;
+
+       pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+       if (!pgmap)
+               return -ENOMEM;
+
+       pgmap->type = MEMORY_DEVICE_GENERIC;
+       pgmap->res.name = "Xen scratch";
+       pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+       ret = allocate_resource(&iomem_resource, &pgmap->res,
+                               alloc_pages * PAGE_SIZE, 0, -1,
+                               PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+       if (ret < 0) {
+               pr_err("Cannot allocate new IOMEM resource\n");
+               kfree(pgmap);
+               return ret;
+       }
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+        /*
+         * memremap will build page tables for the new memory so
+         * the p2m must contain invalid entries so the correct
+         * non-present PTEs will be written.
+         *
+         * If a failure occurs, the original (identity) p2m entries
+         * are not restored since this region is now known not to
+         * conflict with any devices.
+         */
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
+
+               for (i = 0; i < alloc_pages; i++) {
+                       if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
+                               pr_warn("set_phys_to_machine() failed, no memory added\n");
+                               release_resource(&pgmap->res);
+                               kfree(pgmap);
+                               return -ENOMEM;
+                       }
+                }
+       }
+#endif
+
+       vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
+       if (IS_ERR(vaddr)) {
+               pr_err("Cannot remap memory range\n");
+               release_resource(&pgmap->res);
+               kfree(pgmap);
+               return PTR_ERR(vaddr);
+       }
+
+       for (i = 0; i < alloc_pages; i++) {
+               struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
+
+               BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
+               list_add(&pg->lru, &page_list);
+               list_count++;
+       }
+
+       return 0;
+}
+
+/**
+ * xen_alloc_unpopulated_pages - alloc unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages returned
+ * @return 0 on success, error otherwise
+ */
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+       unsigned int i;
+       int ret = 0;
+
+       mutex_lock(&list_lock);
+       if (list_count < nr_pages) {
+               ret = fill_list(nr_pages - list_count);
+               if (ret)
+                       goto out;
+       }
+
+       for (i = 0; i < nr_pages; i++) {
+               struct page *pg = list_first_entry_or_null(&page_list,
+                                                          struct page,
+                                                          lru);
+
+               BUG_ON(!pg);
+               list_del(&pg->lru);
+               list_count--;
+               pages[i] = pg;
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       ret = xen_alloc_p2m_entry(page_to_pfn(pg));
+                       if (ret < 0) {
+                               unsigned int j;
+
+                               for (j = 0; j <= i; j++) {
+                                       list_add(&pages[j]->lru, &page_list);
+                                       list_count++;
+                               }
+                               goto out;
+                       }
+               }
+#endif
+       }
+
+out:
+       mutex_unlock(&list_lock);
+       return ret;
+}
+EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
+
+/**
+ * xen_free_unpopulated_pages - return unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+       unsigned int i;
+
+       mutex_lock(&list_lock);
+       for (i = 0; i < nr_pages; i++) {
+               list_add(&pages[i]->lru, &page_list);
+               list_count++;
+       }
+       mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(xen_free_unpopulated_pages);
+
+#ifdef CONFIG_XEN_PV
+static int __init init(void)
+{
+       unsigned int i;
+
+       if (!xen_domain())
+               return -ENODEV;
+
+       if (!xen_pv_domain())
+               return 0;
+
+       /*
+        * Initialize with pages from the extra memory regions (see
+        * arch/x86/xen/setup.c).
+        */
+       for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               unsigned int j;
+
+               for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
+                       struct page *pg =
+                               pfn_to_page(xen_extra_mem[i].start_pfn + j);
+
+                       list_add(&pg->lru, &page_list);
+                       list_count++;
+               }
+       }
+
+       return 0;
+}
+subsys_initcall(init);
+#endif
index 907bcbb93afbf2a9b1890523165620378e7c9863..2690318ad50f487ad525d1653ee570f0d3772384 100644 (file)
@@ -621,7 +621,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
        bool leaked = false;
        unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
 
-       err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
+       err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
        if (err)
                goto out_err;
 
@@ -662,7 +662,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
                         addr, nr_pages);
  out_free_ballooned_pages:
        if (!leaked)
-               free_xenballooned_pages(nr_pages, node->hvm.pages);
+               xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
  out_err:
        return err;
 }
@@ -858,7 +858,7 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
                               info.addrs);
        if (!rv) {
                vunmap(vaddr);
-               free_xenballooned_pages(nr_pages, node->hvm.pages);
+               xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
        }
        else
                WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
index 7b1077f0abcb0839d89a299de2a9d9d60760943a..34742c6e189e386a5120ad67667faae3031ab25c 100644 (file)
@@ -232,7 +232,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
                kfree(pages);
                return -ENOMEM;
        }
-       rc = alloc_xenballooned_pages(nr_pages, pages);
+       rc = xen_alloc_unpopulated_pages(nr_pages, pages);
        if (rc) {
                pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
                        nr_pages, rc);
@@ -249,7 +249,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
        if (!vaddr) {
                pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
                        nr_pages, rc);
-               free_xenballooned_pages(nr_pages, pages);
+               xen_free_unpopulated_pages(nr_pages, pages);
                kfree(pages);
                kfree(pfns);
                return -ENOMEM;
index 19a72f591e2bdc86bd0edc99a685def4157b2728..43efba045acc7e2a942e049d4b52b4c97244dc8e 100644 (file)
@@ -52,4 +52,13 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
 extern u64 xen_saved_max_mem_size;
 #endif
 
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+#else
+#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
+#define xen_free_unpopulated_pages free_xenballooned_pages
+#include <xen/balloon.h>
+#endif
+
 #endif /* _XEN_XEN_H */