return __i915_gem_object_get_pages(obj);
}
+static inline bool
+i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+{
+ return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+}
+
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- GEM_BUG_ON(!obj->mm.pages);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
atomic_inc(&obj->mm.pages_pin_count);
}
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(!obj->mm.pages);
atomic_dec(&obj->mm.pages_pin_count);
}
struct address_space *mapping;
lockdep_assert_held(&obj->mm.lock);
- GEM_BUG_ON(obj->mm.pages);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
return;
GEM_BUG_ON(obj->bind_count);
- if (!READ_ONCE(obj->mm.pages))
+ if (!i915_gem_object_has_pages(obj))
return;
/* May be called by shrinker from within get_pages() (on another bo) */
if (err)
return err;
- if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
type &= ~I915_MAP_OVERRIDE;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
ret = ____i915_gem_object_get_pages(obj);
atomic_inc(&obj->mm.pages_pin_count);
pinned = false;
}
- GEM_BUG_ON(!obj->mm.pages);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
* allows it to avoid the cost of retrieving a page (either swapin
* or clearing-before-use) before it is overwritten.
*/
- if (READ_ONCE(obj->mm.pages))
+ if (i915_gem_object_has_pages(obj))
return -ENODEV;
/* Before the pages are instantiated the object is treated as being
if (err)
goto out;
- if (obj->mm.pages &&
+ if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+ if (obj->mm.madv == I915_MADV_DONTNEED &&
+ !i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- GEM_BUG_ON(obj->mm.pages);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
intel_fb_obj_flush(obj, ORIGIN_CPU);
}
return 0;
/* Recreate the page after shrinking */
- if (!so->vma->obj->mm.pages)
+ if (!i915_gem_object_has_pages(so->vma->obj))
so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- if (!obj->mm.pages)
+ if (!i915_gem_object_has_pages(obj))
return false;
/* Consider only shrinkable ojects. */
{
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
- return !READ_ONCE(obj->mm.pages);
+ return !i915_gem_object_has_pages(obj);
}
/**
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
- if (!obj->mm.pages) {
+ if (!i915_gem_object_has_pages(obj)) {
__i915_gem_object_invalidate(obj);
list_del_init(&obj->global_link);
count += obj->base.size >> PAGE_SHIFT;
*/
unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
- if (!obj->mm.pages)
+ if (!i915_gem_object_has_pages(obj))
continue;
if (!can_release_pages(obj))
unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
- if (!obj->mm.pages)
+ if (!i915_gem_object_has_pages(obj))
continue;
if (!can_release_pages(obj))
* due to the change in swizzling.
*/
mutex_lock(&obj->mm.lock);
- if (obj->mm.pages &&
+ if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
/* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- WARN_ONCE(obj->mm.pages,
+ WARN_ONCE(i915_gem_object_has_pages(obj),
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
obj->bind_count,
atomic_read(&obj->mm.pages_pin_count),