]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: support forcing the page size with lmem
authorMatthew Auld <matthew.auld@intel.com>
Fri, 25 Jun 2021 10:38:23 +0000 (11:38 +0100)
committerMatthew Auld <matthew.auld@intel.com>
Wed, 30 Jun 2021 12:24:29 +0000 (13:24 +0100)
For some specialised objects we might need something larger than the
regions min_page_size due to some hw restriction, and slightly more
hairy is needing something smaller with the guarantee that such objects
will never be inserted into any GTT, which is the case for the paging
structures.

This also fixes how we setup the BO page_alignment, if we later migrate
the object somewhere else. For example if the placements are {SMEM,
LMEM}, then we might get this wrong. Pushing the min_page_size behaviour
into the manager should fix this.

v2(Thomas): push the default page size behaviour into buddy_man, and let
the user override it with the page-alignment, which looks cleaner

v3: rebase on ttm sys changes

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210625103824.558481-1-matthew.auld@intel.com
18 files changed:
drivers/gpu/drm/i915/gem/i915_gem_create.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.h
drivers/gpu/drm/i915/gem/i915_gem_region.c
drivers/gpu/drm/i915/gem/i915_gem_region.h
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.h
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
drivers/gpu/drm/i915/intel_memory_region.h
drivers/gpu/drm/i915/intel_region_ttm.c
drivers/gpu/drm/i915/selftests/intel_memory_region.c
drivers/gpu/drm/i915/selftests/mock_region.c

index 93bf63bbaff1818cf088d62cff18d4c99302a252..51f92e4b1a69d0d521de833d80ca8866f6d7aa5f 100644 (file)
@@ -90,7 +90,7 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
         */
        flags = I915_BO_ALLOC_USER;
 
-       ret = mr->ops->init_object(mr, obj, size, flags);
+       ret = mr->ops->init_object(mr, obj, size, 0, flags);
        if (ret)
                return ret;
 
index be1d122574af3bd0f7b07653e065c60788607dac..eb345305dc52ed181117e9af7eb8ed98b69f2c40 100644 (file)
@@ -72,11 +72,42 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
                      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
 }
 
+/**
+ * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
+ * minimum page size for the backing pages.
+ * @i915: The i915 instance.
+ * @size: The size in bytes for the object. Note that we need to round the size
+ * up depending on the @page_size. The final object size can be fished out from
+ * the drm GEM object.
+ * @page_size: The requested minimum page size in bytes for this object. This is
+ * useful if we need something bigger than the regions min_page_size due to some
+ * hw restriction, or in some very specialised cases where it needs to be
+ * smaller, where the internal fragmentation cost is too great when rounding up
+ * the object size.
+ * @flags: The optional BO allocation flags.
+ *
+ * Note that this interface assumes you know what you are doing when forcing the
+ * @page_size. If this is smaller than the regions min_page_size then it can
+ * never be inserted into any GTT, otherwise it might lead to undefined
+ * behaviour.
+ *
+ * Return: The object pointer, which might be an ERR_PTR in the case of failure.
+ */
+struct drm_i915_gem_object *
+__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
+                                     resource_size_t size,
+                                     resource_size_t page_size,
+                                     unsigned int flags)
+{
+       return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+                                            size, page_size, flags);
+}
+
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
                            resource_size_t size,
                            unsigned int flags)
 {
        return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
-                                            size, flags);
+                                            size, 0, flags);
 }
index 27a611deba47f378efce9fe8bb3e3fcfb20fb894..4ee81fc66302310bd7da8ff67e67644e623fde3e 100644 (file)
@@ -23,6 +23,11 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
+struct drm_i915_gem_object *
+__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
+                                     resource_size_t size,
+                                     resource_size_t page_size,
+                                     unsigned int flags);
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
                            resource_size_t size,
index 4925563018b4551d248310ee2befb360317004b6..1f557b2178ed33f1440e56abd898fc0a910e0d9a 100644 (file)
@@ -32,9 +32,11 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
 struct drm_i915_gem_object *
 i915_gem_object_create_region(struct intel_memory_region *mem,
                              resource_size_t size,
+                             resource_size_t page_size,
                              unsigned int flags)
 {
        struct drm_i915_gem_object *obj;
+       resource_size_t default_page_size;
        int err;
 
        /*
@@ -48,7 +50,14 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
        if (!mem)
                return ERR_PTR(-ENODEV);
 
-       size = round_up(size, mem->min_page_size);
+       default_page_size = mem->min_page_size;
+       if (page_size)
+               default_page_size = page_size;
+
+       GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
+       GEM_BUG_ON(default_page_size < PAGE_SIZE);
+
+       size = round_up(size, default_page_size);
 
        GEM_BUG_ON(!size);
        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
@@ -60,7 +69,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
        if (!obj)
                return ERR_PTR(-ENOMEM);
 
-       err = mem->ops->init_object(mem, obj, size, flags);
+       err = mem->ops->init_object(mem, obj, size, page_size, flags);
        if (err)
                goto err_object_free;
 
index 84fcb3297400f0dcc35044ae74fc2542a2cc27d3..1008e580a89a449809e735ffae092926cd404871 100644 (file)
@@ -19,6 +19,7 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 struct drm_i915_gem_object *
 i915_gem_object_create_region(struct intel_memory_region *mem,
                              resource_size_t size,
+                             resource_size_t page_size,
                              unsigned int flags);
 
 #endif
index e9ac913b923aee36e4d46c04a29f1fe46a4ee159..6a04cce188fcc99bed2ebf6ec1e20689551db3a0 100644 (file)
@@ -490,6 +490,7 @@ static int __create_shmem(struct drm_i915_private *i915,
 static int shmem_object_init(struct intel_memory_region *mem,
                             struct drm_i915_gem_object *obj,
                             resource_size_t size,
+                            resource_size_t page_size,
                             unsigned int flags)
 {
        static struct lock_class_key lock_class;
@@ -548,7 +549,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
                             resource_size_t size)
 {
        return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
-                                            size, 0);
+                                            size, 0, 0);
 }
 
 /* Allocate a new GEM object and fill it with the supplied data */
index b0c3a7dc60d16d25224b9b57c235a98d96813ab7..90708de276842b7db35cadccd9d9e6cea3303355 100644 (file)
@@ -670,6 +670,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
                                        struct drm_i915_gem_object *obj,
                                        resource_size_t size,
+                                       resource_size_t page_size,
                                        unsigned int flags)
 {
        struct drm_i915_private *i915 = mem->i915;
@@ -708,7 +709,7 @@ struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_i915_private *i915,
                              resource_size_t size)
 {
-       return i915_gem_object_create_region(i915->mm.stolen_region, size, 0);
+       return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
 }
 
 static int init_stolen_smem(struct intel_memory_region *mem)
index 521ab740001a7377d88736cc16b30477a6b8edf1..6589411396d3f6aa8ae2f79dc25dbe493fcaef15 100644 (file)
@@ -893,6 +893,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
                               struct drm_i915_gem_object *obj,
                               resource_size_t size,
+                              resource_size_t page_size,
                               unsigned int flags)
 {
        static struct lock_class_key lock_class;
@@ -915,6 +916,9 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 
        obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
 
+       /* Forcing the page size is kernel internal only */
+       GEM_BUG_ON(page_size && obj->mm.n_placements);
+
        /*
         * If this function fails, it will call the destructor, but
         * our caller still owns the object. So no freeing in the
@@ -924,7 +928,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
         */
        ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
                                   bo_type, &i915_sys_placement,
-                                  mem->min_page_size >> PAGE_SHIFT,
+                                  page_size >> PAGE_SHIFT,
                                   &ctx, NULL, NULL, i915_ttm_bo_destroy);
        if (ret)
                return i915_ttm_err_to_gem(ret);
index b8d3dcbb50df05313579b5edf13dcee336e2a8ea..40927f67b6d90001d9d755ed3ba0c30ec507a9fd 100644 (file)
@@ -44,5 +44,6 @@ i915_ttm_to_gem(struct ttm_buffer_object *bo)
 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
                               struct drm_i915_gem_object *obj,
                               resource_size_t size,
+                              resource_size_t page_size,
                               unsigned int flags);
 #endif
index ccc67ed1a84b162a061a19c051b5ea05b6f0e712..a094f3ce1a902933c1db565b1ee249659420a756 100644 (file)
@@ -496,7 +496,8 @@ static int igt_mock_memory_region_huge_pages(void *arg)
                int i;
 
                for (i = 0; i < ARRAY_SIZE(flags); ++i) {
-                       obj = i915_gem_object_create_region(mem, page_size,
+                       obj = i915_gem_object_create_region(mem,
+                                                           page_size, page_size,
                                                            flags[i]);
                        if (IS_ERR(obj)) {
                                err = PTR_ERR(obj);
index ced6e3a814a254da0f3a62fefce76af89fd9f573..0b7144d2991ca4b0418d71319240622d1cb60151 100644 (file)
@@ -48,7 +48,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
        GEM_BUG_ON(!src_mr);
 
        /* Switch object backing-store on create */
-       obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0);
+       obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
index 607b7d2d4c29c78682b9cfc0b91d12bc7dd9fb89..1da8bd675e541ace6b25f5338c52b257db27a63c 100644 (file)
@@ -958,7 +958,7 @@ static int igt_mmap(void *arg)
                        struct drm_i915_gem_object *obj;
                        int err;
 
-                       obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER);
+                       obj = i915_gem_object_create_region(mr, sizes[i], 0, I915_BO_ALLOC_USER);
                        if (obj == ERR_PTR(-ENODEV))
                                continue;
 
@@ -1084,7 +1084,7 @@ static int igt_mmap_access(void *arg)
                struct drm_i915_gem_object *obj;
                int err;
 
-               obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+               obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
                if (obj == ERR_PTR(-ENODEV))
                        continue;
 
@@ -1229,7 +1229,7 @@ static int igt_mmap_gpu(void *arg)
                struct drm_i915_gem_object *obj;
                int err;
 
-               obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+               obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
                if (obj == ERR_PTR(-ENODEV))
                        continue;
 
@@ -1384,7 +1384,7 @@ static int igt_mmap_revoke(void *arg)
                struct drm_i915_gem_object *obj;
                int err;
 
-               obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+               obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0, I915_BO_ALLOC_USER);
                if (obj == ERR_PTR(-ENODEV))
                        continue;
 
index fc7ad5c035b8f93a662fc5fd1dd1c2278abaf0e8..6877362f6b85ffb768dff7cbba084121eab52d46 100644 (file)
@@ -18,6 +18,7 @@ struct i915_ttm_buddy_manager {
        struct i915_buddy_mm mm;
        struct list_head reserved;
        struct mutex lock;
+       u64 default_page_size;
 };
 
 static struct i915_ttm_buddy_manager *
@@ -53,7 +54,10 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
        GEM_BUG_ON(!bman_res->base.num_pages);
        size = bman_res->base.num_pages << PAGE_SHIFT;
 
-       min_page_size = bo->page_alignment << PAGE_SHIFT;
+       min_page_size = bman->default_page_size;
+       if (bo->page_alignment)
+               min_page_size = bo->page_alignment << PAGE_SHIFT;
+
        GEM_BUG_ON(min_page_size < mm->chunk_size);
        min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
        if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
@@ -134,6 +138,9 @@ static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
  * @type: Memory type we want to manage
  * @use_tt: Set use_tt for the manager
  * @size: The size in bytes to manage
+ * @default_page_size: The default minimum page size in bytes for allocations,
+ * this must be at least as large as @chunk_size, and can be overridden by
+ * setting the BO page_alignment, to be larger or smaller as needed.
  * @chunk_size: The minimum page size in bytes for our allocations i.e
  * order-zero
  *
@@ -154,7 +161,8 @@ static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
  */
 int i915_ttm_buddy_man_init(struct ttm_device *bdev,
                            unsigned int type, bool use_tt,
-                           u64 size, u64 chunk_size)
+                           u64 size, u64 default_page_size,
+                           u64 chunk_size)
 {
        struct ttm_resource_manager *man;
        struct i915_ttm_buddy_manager *bman;
@@ -170,6 +178,8 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev,
 
        mutex_init(&bman->lock);
        INIT_LIST_HEAD(&bman->reserved);
+       GEM_BUG_ON(default_page_size < chunk_size);
+       bman->default_page_size = default_page_size;
 
        man = &bman->manager;
        man->use_tt = use_tt;
index 26026213e20aed2bb79695fc48f8971a099abcff..0722d33f3e14699eefea013b3dd87b3d7ece72be 100644 (file)
@@ -46,7 +46,7 @@ to_ttm_buddy_resource(struct ttm_resource *res)
 
 int i915_ttm_buddy_man_init(struct ttm_device *bdev,
                            unsigned type, bool use_tt,
-                           u64 size, u64 chunk_size);
+                           u64 size, u64 default_page_size, u64 chunk_size);
 int i915_ttm_buddy_man_fini(struct ttm_device *bdev,
                            unsigned int type);
 
index b1b9e461d53be119d28aad160704e50bb6019ce2..1f2b96efa69db3c3d8d5ec2fe4c9a96bf70b5f10 100644 (file)
@@ -55,6 +55,7 @@ struct intel_memory_region_ops {
        int (*init_object)(struct intel_memory_region *mem,
                           struct drm_i915_gem_object *obj,
                           resource_size_t size,
+                          resource_size_t page_size,
                           unsigned int flags);
 };
 
index 4cd10f364e84f168cf358f56e647528028cc7452..98c7339bf8babd4f0b22f9eaef43087f10797f8a 100644 (file)
@@ -86,7 +86,8 @@ int intel_region_ttm_init(struct intel_memory_region *mem)
        int ret;
 
        ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
-                                     resource_size(&mem->region), PAGE_SIZE);
+                                     resource_size(&mem->region),
+                                     mem->min_page_size, PAGE_SIZE);
        if (ret)
                return ret;
 
@@ -167,7 +168,6 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
        int ret;
 
        mock_bo.base.size = size;
-       mock_bo.page_alignment = mem->min_page_size >> PAGE_SHIFT;
        place.flags = flags;
 
        ret = man->func->alloc(man, &mock_bo, &place, &res);
index ecc3b9e6c22b2e9f0e9538eb4ebf13faaff73dbc..1aaccb9841a089858c0e6e50d6ce37e7ebd59407 100644 (file)
@@ -68,7 +68,7 @@ static int igt_mock_fill(void *arg)
                resource_size_t size = page_num * page_size;
                struct drm_i915_gem_object *obj;
 
-               obj = i915_gem_object_create_region(mem, size, 0);
+               obj = i915_gem_object_create_region(mem, size, 0, 0);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
                        break;
@@ -110,7 +110,7 @@ igt_object_create(struct intel_memory_region *mem,
        struct drm_i915_gem_object *obj;
        int err;
 
-       obj = i915_gem_object_create_region(mem, size, flags);
+       obj = i915_gem_object_create_region(mem, size, 0, flags);
        if (IS_ERR(obj))
                return obj;
 
@@ -647,6 +647,62 @@ out_put:
        return err;
 }
 
+static int igt_lmem_create_with_ps(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       int err = 0;
+       u32 ps;
+
+       for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
+               struct drm_i915_gem_object *obj;
+               dma_addr_t daddr;
+
+               obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       if (err == -ENXIO || err == -E2BIG) {
+                               pr_info("%s not enough lmem for ps(%u) err=%d\n",
+                                       __func__, ps, err);
+                               err = 0;
+                       }
+
+                       break;
+               }
+
+               if (obj->base.size != ps) {
+                       pr_err("%s size(%zu) != ps(%u)\n",
+                              __func__, obj->base.size, ps);
+                       err = -EINVAL;
+                       goto out_put;
+               }
+
+               i915_gem_object_lock(obj, NULL);
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto out_put;
+
+               daddr = i915_gem_object_get_dma_address(obj, 0);
+               if (!IS_ALIGNED(daddr, ps)) {
+                       pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
+                              __func__, &daddr, ps);
+                       err = -EINVAL;
+                       goto out_unpin;
+               }
+
+out_unpin:
+               i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_put_pages(obj);
+out_put:
+               i915_gem_object_unlock(obj);
+               i915_gem_object_put(obj);
+
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
 static int igt_lmem_create_cleared_cpu(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -932,7 +988,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
        struct drm_i915_gem_object *obj;
        void *addr;
 
-       obj = i915_gem_object_create_region(mr, size, 0);
+       obj = i915_gem_object_create_region(mr, size, 0, 0);
        if (IS_ERR(obj)) {
                if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
                        return ERR_PTR(-ENODEV);
@@ -1149,6 +1205,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_lmem_create),
+               SUBTEST(igt_lmem_create_with_ps),
                SUBTEST(igt_lmem_create_cleared_cpu),
                SUBTEST(igt_lmem_write_cpu),
                SUBTEST(igt_lmem_write_gpu),
index fa786dede60802d4d0440297077678639c24ce2d..efa86dffe3c677b3ca5b0c3b6426f7c78f0d278c 100644 (file)
@@ -63,6 +63,7 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
 static int mock_object_init(struct intel_memory_region *mem,
                            struct drm_i915_gem_object *obj,
                            resource_size_t size,
+                           resource_size_t page_size,
                            unsigned int flags)
 {
        static struct lock_class_key lock_class;