]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Refactor testing obj->mm.pages
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 13 Oct 2017 20:26:13 +0000 (21:26 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 16 Oct 2017 19:44:19 +0000 (20:44 +0100)
Since we occasionally stuff an error pointer into obj->mm.pages for a
semi-permanent or even permanent failure, we have to be more careful and
not just test against NULL when deciding if the object has a complete
set of its concurrent pages.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_clflush.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gem_userptr.c

index d61936106e3609925bbcea2dd8636165f00a9e0f..a9bc979e0cbd504646d9990c3b1c0fd74a826d45 100644 (file)
@@ -3566,10 +3566,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
        return __i915_gem_object_get_pages(obj);
 }
 
+static inline bool
+i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+{
+       return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+}
+
 static inline void
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-       GEM_BUG_ON(!obj->mm.pages);
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
        atomic_inc(&obj->mm.pages_pin_count);
 }
@@ -3583,8 +3589,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-       GEM_BUG_ON(!obj->mm.pages);
 
        atomic_dec(&obj->mm.pages_pin_count);
 }
index d9d39b309ce8689e917a3c38fca48a7848d2223d..05907171505337f6bb662155bf588592c32da5da 100644 (file)
@@ -2196,7 +2196,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
        struct address_space *mapping;
 
        lockdep_assert_held(&obj->mm.lock);
-       GEM_BUG_ON(obj->mm.pages);
+       GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
        switch (obj->mm.madv) {
        case I915_MADV_DONTNEED:
@@ -2259,7 +2259,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
                return;
 
        GEM_BUG_ON(obj->bind_count);
-       if (!READ_ONCE(obj->mm.pages))
+       if (!i915_gem_object_has_pages(obj))
                return;
 
        /* May be called by shrinker from within get_pages() (on another bo) */
@@ -2563,7 +2563,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        if (err)
                return err;
 
-       if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+       if (unlikely(!i915_gem_object_has_pages(obj))) {
                GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
                err = ____i915_gem_object_get_pages(obj);
@@ -2648,7 +2648,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
        type &= ~I915_MAP_OVERRIDE;
 
        if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-               if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+               if (unlikely(!i915_gem_object_has_pages(obj))) {
                        GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
                        ret = ____i915_gem_object_get_pages(obj);
@@ -2660,7 +2660,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
                atomic_inc(&obj->mm.pages_pin_count);
                pinned = false;
        }
-       GEM_BUG_ON(!obj->mm.pages);
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
        ptr = page_unpack_bits(obj->mm.mapping, &has_type);
        if (ptr && has_type != type) {
@@ -2715,7 +2715,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
         * allows it to avoid the cost of retrieving a page (either swapin
         * or clearing-before-use) before it is overwritten.
         */
-       if (READ_ONCE(obj->mm.pages))
+       if (i915_gem_object_has_pages(obj))
                return -ENODEV;
 
        /* Before the pages are instantiated the object is treated as being
@@ -4278,7 +4278,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (err)
                goto out;
 
-       if (obj->mm.pages &&
+       if (i915_gem_object_has_pages(obj) &&
            i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -4297,7 +4297,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                obj->mm.madv = args->madv;
 
        /* if the object is no longer attached, discard its backing storage */
-       if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+       if (obj->mm.madv == I915_MADV_DONTNEED &&
+           !i915_gem_object_has_pages(obj))
                i915_gem_object_truncate(obj);
 
        args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -4514,7 +4515,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
                        atomic_set(&obj->mm.pages_pin_count, 0);
                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-               GEM_BUG_ON(obj->mm.pages);
+               GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
                if (obj->base.import_attach)
                        drm_prime_gem_destroy(&obj->base, NULL);
index 8a04d33055be578fca6b2063fbc5b4e3cbfe60f0..f663cd9197954e0e24590b7374391566df2eb899 100644 (file)
@@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
 
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        drm_clflush_sg(obj->mm.pages);
        intel_fb_obj_flush(obj, ORIGIN_CPU);
 }
index 4dd4c2159a92e26d91fd98e560c587a91b964e48..3703dc91eedab90e1241bc8e37550d1ac95d2470 100644 (file)
@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
                return 0;
 
        /* Recreate the page after shrinking */
-       if (!so->vma->obj->mm.pages)
+       if (!i915_gem_object_has_pages(so->vma->obj))
                so->batch_offset = -1;
 
        ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
index 74002b2d1b6f82640c7e1cf54b8791f01a049327..b5c87d89777b2684e2220915cd63cdc18b396d9b 100644 (file)
@@ -97,7 +97,7 @@ static bool swap_available(void)
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
-       if (!obj->mm.pages)
+       if (!i915_gem_object_has_pages(obj))
                return false;
 
        /* Consider only shrinkable ojects. */
@@ -129,7 +129,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
 {
        if (i915_gem_object_unbind(obj) == 0)
                __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
-       return !READ_ONCE(obj->mm.pages);
+       return !i915_gem_object_has_pages(obj);
 }
 
 /**
@@ -247,7 +247,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                                /* May arrive from get_pages on another bo */
                                mutex_lock_nested(&obj->mm.lock,
                                                  I915_MM_SHRINKER);
-                               if (!obj->mm.pages) {
+                               if (!i915_gem_object_has_pages(obj)) {
                                        __i915_gem_object_invalidate(obj);
                                        list_del_init(&obj->global_link);
                                        count += obj->base.size >> PAGE_SHIFT;
@@ -413,7 +413,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
         */
        unbound = bound = unevictable = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
-               if (!obj->mm.pages)
+               if (!i915_gem_object_has_pages(obj))
                        continue;
 
                if (!can_release_pages(obj))
@@ -422,7 +422,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
                        unbound += obj->base.size >> PAGE_SHIFT;
        }
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
-               if (!obj->mm.pages)
+               if (!i915_gem_object_has_pages(obj))
                        continue;
 
                if (!can_release_pages(obj))
index fb5231f98c0d620f1ccf03a9872607b1373dc0e2..1294cf695df0588d8e3912b67566816680cc1243 100644 (file)
@@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
         * due to the change in swizzling.
         */
        mutex_lock(&obj->mm.lock);
-       if (obj->mm.pages &&
+       if (i915_gem_object_has_pages(obj) &&
            obj->mm.madv == I915_MADV_WILLNEED &&
            i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                if (tiling == I915_TILING_NONE) {
index 4d712a4db63b4f50f87f6d8fc304e43450e64a0d..9afeb68e2d448edfbf5e2d7610309380163f8546 100644 (file)
@@ -82,7 +82,7 @@ static void cancel_userptr(struct work_struct *work)
        /* We are inside a kthread context and can't be interrupted */
        if (i915_gem_object_unbind(obj) == 0)
                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-       WARN_ONCE(obj->mm.pages,
+       WARN_ONCE(i915_gem_object_has_pages(obj),
                  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
                  obj->bind_count,
                  atomic_read(&obj->mm.pages_pin_count),