return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class,
- I915_BO_ALLOC_STRUCT_PAGE);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0);
+ obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
/*
* Mark the object as volatile, such that the pages are marked as
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
+/**
+ * __i915_gem_object_is_lmem - Whether the object is resident in
+ * lmem while in the fence signaling critical path.
+ * @obj: The object to check.
+ *
+ * This function is intended to be called from within the fence signaling
+ * path where the fence keeps the object from being migrated. For example
+ * during gpu reset or similar.
+ *
+ * Return: Whether the object is resident in lmem.
+ */
+bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
+
+#ifdef CONFIG_LOCKDEP
+ GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true));
+#endif
+ return mr && (mr->type == INTEL_MEMORY_LOCAL ||
+ mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+}
+
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
if (mmap_type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_has_struct_page(obj) &&
- !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
+ !i915_gem_object_has_iomem(obj))
return -ENODEV;
mmo = mmap_offset_attach(obj, mmap_type, file);
if (!obj)
return -ENOENT;
+ err = i915_gem_object_lock_interruptible(obj, NULL);
+ if (err)
+ goto out_put;
err = __assign_mmap_offset(obj, mmap_type, offset, file);
+ i915_gem_object_unlock(obj);
+out_put:
i915_gem_object_put(obj);
return err;
}
return PTR_ERR(anon);
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-
- if (i915_gem_object_has_iomem(obj))
- vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
/*
* We keep the ref on mmo->obj, not vm_file, but we require
return obj->mm.n_placements > 1;
}
+/**
+ * i915_gem_object_has_struct_page - Whether the object is page-backed
+ * @obj: The object to query.
+ *
+ * This function should only be called while the object is locked or pinned,
+ * otherwise the page backing may change under the caller.
+ *
+ * Return: True if page-backed, false otherwise.
+ */
+bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+#ifdef CONFIG_LOCKDEP
+ if (IS_DGFX(to_i915(obj->base.dev)) &&
+ i915_gem_object_evictable((void __force *)obj))
+ assert_object_held_shared(obj);
+#endif
+ return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
+}
+
+/**
+ * i915_gem_object_has_iomem - Whether the object is iomem-backed
+ * @obj: The object to query.
+ *
+ * This function should only be called while the object is locked or pinned,
+ * otherwise the iomem backing may change under the caller.
+ *
+ * Return: True if iomem-backed, false otherwise.
+ */
+bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+#ifdef CONFIG_LOCKDEP
+ if (IS_DGFX(to_i915(obj->base.dev)) &&
+ i915_gem_object_evictable((void __force *)obj))
+ assert_object_held_shared(obj);
+#endif
+ return obj->mem_flags & I915_BO_FLAG_IOMEM;
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
/*
* If more than one potential simultaneous locker, assert held.
*/
-static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
+static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
{
/*
* Note mm list lookup is protected by
return obj->ops->flags & flags;
}
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
-}
+bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
-static inline bool
-i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
-}
+bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
struct drm_i915_gem_object_ops {
unsigned int flags;
-#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
-#define I915_GEM_OBJECT_IS_PROXY BIT(3)
-#define I915_GEM_OBJECT_NO_MMAP BIT(4)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+#define I915_GEM_OBJECT_IS_PROXY BIT(2)
+#define I915_GEM_OBJECT_NO_MMAP BIT(3)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
unsigned long flags;
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
-#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
-#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
-#define I915_BO_ALLOC_USER BIT(4)
+#define I915_BO_ALLOC_CPU_CLEAR BIT(2)
+#define I915_BO_ALLOC_USER BIT(3)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
- I915_BO_ALLOC_STRUCT_PAGE | \
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER)
-#define I915_BO_READONLY BIT(5)
-#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */
+#define I915_BO_READONLY BIT(4)
+#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
+ /**
+ * @mem_flags - Mutable placement-related flags
+ *
+ * These are flags that indicate specifics of the memory region
+ * the object is currently in. As such they are only stable
+ * either under the object lock or if the object is pinned.
+ */
+ unsigned int mem_flags;
+#define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
+#define I915_BO_FLAG_IOMEM BIT(1) /* Object backed by IO memory */
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
int err;
if (!i915_gem_object_has_struct_page(obj) &&
- !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
+ !i915_gem_object_has_iomem(obj))
return ERR_PTR(-ENXIO);
assert_object_held(obj);
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
/* We're no longer struct page backed */
- obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
+ obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
static void shmem_release(struct drm_i915_gem_object *obj)
{
- if (obj->flags & I915_BO_ALLOC_STRUCT_PAGE)
+ if (i915_gem_object_has_struct_page(obj))
i915_gem_object_release_memory_region(obj);
fput(obj->base.filp);
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class,
- I915_BO_ALLOC_STRUCT_PAGE);
-
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+ obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm",
- .flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
i915_gem_object_init_memory_region(obj, mem);
i915_gem_object_make_unshrinkable(obj);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+ obj->mem_flags |= I915_BO_FLAG_IOMEM;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->ttm.get_io_page.lock);
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
- I915_BO_ALLOC_STRUCT_PAGE);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
+ obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops, &lock_class,
- I915_BO_ALLOC_STRUCT_PAGE);
+ i915_gem_object_init(obj, &huge_ops, &lock_class, 0);
+ obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops, &lock_class,
- I915_BO_ALLOC_STRUCT_PAGE);
-
+ i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
+ obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
+ bool no_map;
+
if (type == I915_MMAP_TYPE_GTT &&
!i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
return false;
- if (type != I915_MMAP_TYPE_GTT &&
- !i915_gem_object_has_struct_page(obj) &&
- !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
- return false;
+ i915_gem_object_lock(obj, NULL);
+ no_map = (type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_has_struct_page(obj) &&
+ !i915_gem_object_has_iomem(obj));
+ i915_gem_object_unlock(obj);
- return true;
+ return !no_map;
}
static void object_set_placements(struct drm_i915_gem_object *obj,
}
}
-static bool can_access(const struct drm_i915_gem_object *obj)
+static bool can_access(struct drm_i915_gem_object *obj)
{
- return i915_gem_object_has_struct_page(obj) ||
- i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+ bool access;
+
+ i915_gem_object_lock(obj, NULL);
+ access = i915_gem_object_has_struct_page(obj) ||
+ i915_gem_object_has_iomem(obj);
+ i915_gem_object_unlock(obj);
+
+ return access;
}
static int __igt_mmap_access(struct drm_i915_private *i915,
goto out;
}
+ i915_gem_object_lock(obj, NULL);
if (!i915_gem_object_has_struct_page(obj)) {
+ i915_gem_object_unlock(obj);
err = -EINVAL;
pr_err("shmem has no struct page\n");
goto out_obj;
}
- i915_gem_object_lock(obj, NULL);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
i915_gem_object_unlock(obj);
if (err) {
if (ret)
break;
}
- } else if (i915_gem_object_is_lmem(vma->obj)) {
+ } else if (__i915_gem_object_is_lmem(vma->obj)) {
struct intel_memory_region *mem = vma->obj->mm.region;
dma_addr_t dma;