]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Update object placement flags to be mutable
authorThomas Hellström <thomas.hellstrom@linux.intel.com>
Thu, 24 Jun 2021 08:42:38 +0000 (10:42 +0200)
committerMatthew Auld <matthew.auld@intel.com>
Thu, 24 Jun 2021 17:50:56 +0000 (18:50 +0100)
The object ops i915_GEM_OBJECT_HAS_IOMEM and the object
I915_BO_ALLOC_STRUCT_PAGE flags are considered immutable by
much of our code. Introduce a new mem_flags member to hold these
and make sure checks for these flags being set are either done
under the object lock or with pages properly pinned. The flags
will change during migration under the object lock.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210624084240.270219-2-thomas.hellstrom@linux.intel.com
17 files changed:
drivers/gpu/drm/i915/gem/i915_gem_internal.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.h
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
drivers/gpu/drm/i915/i915_gpu_error.c

index ce6b664b10aa5a6c13ec7a910b82a8a7345cc2a6..13b217f75055516f1dd7df8b7ca44d25d98f7531 100644 (file)
@@ -177,8 +177,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
                return ERR_PTR(-ENOMEM);
 
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
-       i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
+       i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
 
        /*
         * Mark the object as volatile, such that the pages are marked as
index d539dffa15547958963bc2a4c501f753d155cec8..41d5182cd36769a698305271d0baf0cad31ce753 100644 (file)
@@ -71,6 +71,28 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
                      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
 }
 
+/**
+ * __i915_gem_object_is_lmem - Whether the object is resident in
+ * lmem while in the fence signaling critical path.
+ * @obj: The object to check.
+ *
+ * This function is intended to be called from within the fence signaling
+ * path where the fence keeps the object from being migrated. For example
+ * during gpu reset or similar.
+ *
+ * Return: Whether the object is resident in lmem.
+ */
+bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+       struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
+
+#ifdef CONFIG_LOCKDEP
+       GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true));
+#endif
+       return mr && (mr->type == INTEL_MEMORY_LOCAL ||
+                     mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+}
+
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
                            resource_size_t size,
index ea76fd11ccb0c0281855b9e96e374a35120a538f..27a611deba47f378efce9fe8bb3e3fcfb20fb894 100644 (file)
@@ -21,6 +21,8 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
 
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
+bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
                            resource_size_t size,
index 2fd155742bd2bc06ecbeeb4556879fc252a307c5..6497a2dbdab97faa72046fd750e0ab3b2f493637 100644 (file)
@@ -684,7 +684,7 @@ __assign_mmap_offset(struct drm_i915_gem_object *obj,
 
        if (mmap_type != I915_MMAP_TYPE_GTT &&
            !i915_gem_object_has_struct_page(obj) &&
-           !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
+           !i915_gem_object_has_iomem(obj))
                return -ENODEV;
 
        mmo = mmap_offset_attach(obj, mmap_type, file);
@@ -708,7 +708,12 @@ __assign_mmap_offset_handle(struct drm_file *file,
        if (!obj)
                return -ENOENT;
 
+       err = i915_gem_object_lock_interruptible(obj, NULL);
+       if (err)
+               goto out_put;
        err = __assign_mmap_offset(obj, mmap_type, offset, file);
+       i915_gem_object_unlock(obj);
+out_put:
        i915_gem_object_put(obj);
        return err;
 }
@@ -932,10 +937,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
                return PTR_ERR(anon);
        }
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-
-       if (i915_gem_object_has_iomem(obj))
-               vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
 
        /*
         * We keep the ref on mmo->obj, not vm_file, but we require
index cf18c430d51fad256101458fdccafa11d5fc2fb2..07e8ff9a8aaef340b989f26d83f0605663ceab63 100644 (file)
@@ -475,6 +475,44 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
        return obj->mm.n_placements > 1;
 }
 
+/**
+ * i915_gem_object_has_struct_page - Whether the object is page-backed
+ * @obj: The object to query.
+ *
+ * This function should only be called while the object is locked or pinned,
+ * otherwise the page backing may change under the caller.
+ *
+ * Return: True if page-backed, false otherwise.
+ */
+bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+#ifdef CONFIG_LOCKDEP
+       if (IS_DGFX(to_i915(obj->base.dev)) &&
+           i915_gem_object_evictable((void __force *)obj))
+               assert_object_held_shared(obj);
+#endif
+       return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
+}
+
+/**
+ * i915_gem_object_has_iomem - Whether the object is iomem-backed
+ * @obj: The object to query.
+ *
+ * This function should only be called while the object is locked or pinned,
+ * otherwise the iomem backing may change under the caller.
+ *
+ * Return: True if iomem-backed, false otherwise.
+ */
+bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+#ifdef CONFIG_LOCKDEP
+       if (IS_DGFX(to_i915(obj->base.dev)) &&
+           i915_gem_object_evictable((void __force *)obj))
+               assert_object_held_shared(obj);
+#endif
+       return obj->mem_flags & I915_BO_FLAG_IOMEM;
+}
+
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
        INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
index 7bf4dd46d8d26020d8e6c46cdf4a46e126dec070..ea3224a480c49100590cd2fb9cf88330a62477df 100644 (file)
@@ -148,7 +148,7 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
 /*
  * If more than one potential simultaneous locker, assert held.
  */
-static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
+static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
 {
        /*
         * Note mm list lookup is protected by
@@ -266,17 +266,9 @@ i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
        return obj->ops->flags & flags;
 }
 
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
-       return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
-}
+bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
 
-static inline bool
-i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
-{
-       return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
-}
+bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
 
 static inline bool
 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
index 3a2d9ecf8e03a8ff579a15273fbc04a69aeb9fe7..441f913c87e62ec6b6f2656a730f3f6b48354488 100644 (file)
@@ -33,10 +33,9 @@ struct i915_lut_handle {
 
 struct drm_i915_gem_object_ops {
        unsigned int flags;
-#define I915_GEM_OBJECT_HAS_IOMEM      BIT(1)
-#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(2)
-#define I915_GEM_OBJECT_IS_PROXY       BIT(3)
-#define I915_GEM_OBJECT_NO_MMAP                BIT(4)
+#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
+#define I915_GEM_OBJECT_IS_PROXY       BIT(2)
+#define I915_GEM_OBJECT_NO_MMAP                BIT(3)
 
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
@@ -201,17 +200,25 @@ struct drm_i915_gem_object {
        unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
 #define I915_BO_ALLOC_VOLATILE   BIT(1)
-#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
-#define I915_BO_ALLOC_CPU_CLEAR  BIT(3)
-#define I915_BO_ALLOC_USER       BIT(4)
+#define I915_BO_ALLOC_CPU_CLEAR  BIT(2)
+#define I915_BO_ALLOC_USER       BIT(3)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
                             I915_BO_ALLOC_VOLATILE | \
-                            I915_BO_ALLOC_STRUCT_PAGE | \
                             I915_BO_ALLOC_CPU_CLEAR | \
                             I915_BO_ALLOC_USER)
-#define I915_BO_READONLY         BIT(5)
-#define I915_TILING_QUIRK_BIT    6 /* unknown swizzling; do not release! */
+#define I915_BO_READONLY         BIT(4)
+#define I915_TILING_QUIRK_BIT    5 /* unknown swizzling; do not release! */
 
+       /**
+        * @mem_flags - Mutable placement-related flags
+        *
+        * These are flags that indicate specifics of the memory region
+        * the object is currently in. As such they are only stable
+        * either under the object lock or if the object is pinned.
+        */
+       unsigned int mem_flags;
+#define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
+#define I915_BO_FLAG_IOMEM       BIT(1) /* Object backed by IO memory */
        /*
         * Is the object to be mapped as read-only to the GPU
         * Only honoured if hardware has relevant pte bit
index 086005c1c7eaec558a6fd386b2e70960aad54207..f2f850e31b8edaca2147e88c6887bafe9523d26c 100644 (file)
@@ -351,7 +351,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
        int err;
 
        if (!i915_gem_object_has_struct_page(obj) &&
-           !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
+           !i915_gem_object_has_iomem(obj))
                return ERR_PTR(-ENXIO);
 
        assert_object_held(obj);
index be72ad0634bae5b0674921fb9e2cf8809893e752..7986612f48fad42872da18169f83f768ffa03d63 100644 (file)
@@ -76,7 +76,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
        /* We're no longer struct page backed */
-       obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
+       obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
        __i915_gem_object_set_pages(obj, st, sg->length);
 
        return 0;
index 5d16c4462fda0c680bab3cfd7dd1d26bcaf49ecf..7aa1c95c7b7ddcd2a8fa04ffb273357c52c65839 100644 (file)
@@ -444,7 +444,7 @@ shmem_pread(struct drm_i915_gem_object *obj,
 
 static void shmem_release(struct drm_i915_gem_object *obj)
 {
-       if (obj->flags & I915_BO_ALLOC_STRUCT_PAGE)
+       if (i915_gem_object_has_struct_page(obj))
                i915_gem_object_release_memory_region(obj);
 
        fput(obj->base.filp);
@@ -513,9 +513,8 @@ static int shmem_object_init(struct intel_memory_region *mem,
        mapping_set_gfp_mask(mapping, mask);
        GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
-       i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
-
+       i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
 
index cf5540c1537bf51849e17e4ca237d3b77b9ad5e8..e41bb9d6a4919a5266db1e75417969d52a907467 100644 (file)
@@ -732,7 +732,6 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
 
 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
        .name = "i915_gem_object_ttm",
-       .flags = I915_GEM_OBJECT_HAS_IOMEM,
 
        .get_pages = i915_ttm_get_pages,
        .put_pages = i915_ttm_put_pages,
@@ -777,6 +776,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
        i915_gem_object_init_memory_region(obj, mem);
        i915_gem_object_make_unshrinkable(obj);
        obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+       obj->mem_flags |= I915_BO_FLAG_IOMEM;
        i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
        INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->ttm.get_io_page.lock);
index 4b0acc7eaa271fdc48300d9bf60c3a1c1398c6c8..56edfeff8c02e9021dac6b3da1914cfb663ddf65 100644 (file)
@@ -510,8 +510,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
                return -ENOMEM;
 
        drm_gem_private_object_init(dev, &obj->base, args->user_size);
-       i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
+       i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
+       obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
index 0c8ecfdf54056b6c70ce05d50fd95eac037333c1..f963b8e1e37b5d984ed1500bbb4607f17ab3492d 100644 (file)
@@ -114,8 +114,8 @@ huge_gem_object(struct drm_i915_private *i915,
                return ERR_PTR(-ENOMEM);
 
        drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
-       i915_gem_object_init(obj, &huge_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
+       i915_gem_object_init(obj, &huge_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
 
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
index dadd485bc52faa314b94f05f8e78d7175b436ae3..ccc67ed1a84b162a061a19c051b5ea05b6f0e712 100644 (file)
@@ -167,9 +167,8 @@ huge_pages_object(struct drm_i915_private *i915,
                return ERR_PTR(-ENOMEM);
 
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
-       i915_gem_object_init(obj, &huge_page_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
-
+       i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
        i915_gem_object_set_volatile(obj);
 
        obj->write_domain = I915_GEM_DOMAIN_CPU;
index 44b5de06ce64934c0d481a160c282880202c0e1e..607b7d2d4c29c78682b9cfc0b91d12bc7dd9fb89 100644 (file)
@@ -831,16 +831,19 @@ static int wc_check(struct drm_i915_gem_object *obj)
 
 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 {
+       bool no_map;
+
        if (type == I915_MMAP_TYPE_GTT &&
            !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
                return false;
 
-       if (type != I915_MMAP_TYPE_GTT &&
-           !i915_gem_object_has_struct_page(obj) &&
-           !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
-               return false;
+       i915_gem_object_lock(obj, NULL);
+       no_map = (type != I915_MMAP_TYPE_GTT &&
+                 !i915_gem_object_has_struct_page(obj) &&
+                 !i915_gem_object_has_iomem(obj));
+       i915_gem_object_unlock(obj);
 
-       return true;
+       return !no_map;
 }
 
 static void object_set_placements(struct drm_i915_gem_object *obj,
@@ -988,10 +991,16 @@ static const char *repr_mmap_type(enum i915_mmap_type type)
        }
 }
 
-static bool can_access(const struct drm_i915_gem_object *obj)
+static bool can_access(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_object_has_struct_page(obj) ||
-              i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+       bool access;
+
+       i915_gem_object_lock(obj, NULL);
+       access = i915_gem_object_has_struct_page(obj) ||
+               i915_gem_object_has_iomem(obj);
+       i915_gem_object_unlock(obj);
+
+       return access;
 }
 
 static int __igt_mmap_access(struct drm_i915_private *i915,
index 3a6ce87f8b524c96cb0a32f9f00554e89799d0a4..d43d8dae0f699242ce6f45356b64833aae0b1353 100644 (file)
@@ -25,13 +25,14 @@ static int mock_phys_object(void *arg)
                goto out;
        }
 
+       i915_gem_object_lock(obj, NULL);
        if (!i915_gem_object_has_struct_page(obj)) {
+               i915_gem_object_unlock(obj);
                err = -EINVAL;
                pr_err("shmem has no struct page\n");
                goto out_obj;
        }
 
-       i915_gem_object_lock(obj, NULL);
        err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
        i915_gem_object_unlock(obj);
        if (err) {
index cb182c6d265a43795dc42649ef260ff9bf694b3c..a2c58b54a59282278ad575ddda8f30dab9c0b039 100644 (file)
@@ -1039,7 +1039,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
                        if (ret)
                                break;
                }
-       } else if (i915_gem_object_is_lmem(vma->obj)) {
+       } else if (__i915_gem_object_is_lmem(vma->obj)) {
                struct intel_memory_region *mem = vma->obj->mm.region;
                dma_addr_t dma;