]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Skip shrinking already freed pages
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 18 Jun 2019 07:41:29 +0000 (08:41 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 18 Jun 2019 17:09:08 +0000 (18:09 +0100)
Previously, we wanted to shrink the pages of freed objects before they
were finally RCU collected. However, by removing the struct_mutex
serialisation around the active reference, we need to acquire an extra
reference around the wait. Unfortunately this means that we have to skip
objects that are waiting RCU collection.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110937
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190618074153.16055-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c

index a8c77f5713c66794ab3eab3e67d1527c311d034f..be6caccce0c5e0fd31a441e34bc89833c290dd21 100644 (file)
@@ -146,33 +146,6 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
        }
 }
 
-static bool discard_backing_storage(struct drm_i915_gem_object *obj)
-{
-       /*
-        * If we are the last user of the backing storage (be it shmemfs
-        * pages or stolen etc), we know that the pages are going to be
-        * immediately released. In this case, we can then skip copying
-        * back the contents from the GPU.
-        */
-       if (!i915_gem_object_is_shrinkable(obj))
-               return false;
-
-       if (obj->mm.madv != I915_MADV_WILLNEED)
-               return false;
-
-       if (!obj->base.filp)
-               return true;
-
-       /* At first glance, this looks racy, but then again so would be
-        * userspace racing mmap against close. However, the first external
-        * reference to the filp can only be obtained through the
-        * i915_gem_mmap_ioctl() which safeguards us against the user
-        * acquiring such a reference whilst we are in the middle of
-        * freeing the object.
-        */
-       return file_count(obj->base.filp) == 1;
-}
-
 static void __i915_gem_free_objects(struct drm_i915_private *i915,
                                    struct llist_node *freed)
 {
@@ -222,8 +195,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                if (obj->ops->release)
                        obj->ops->release(obj);
 
-               if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
-                       atomic_set(&obj->mm.pages_pin_count, 0);
+               atomic_set(&obj->mm.pages_pin_count, 0);
                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
@@ -320,23 +292,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 
-       if (obj->mm.quirked)
-               __i915_gem_object_unpin_pages(obj);
-
-       if (discard_backing_storage(obj)) {
-               struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
-               obj->mm.madv = I915_MADV_DONTNEED;
-
-               if (i915_gem_object_has_pages(obj)) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&i915->mm.obj_lock, flags);
-                       list_move_tail(&obj->mm.link, &i915->mm.purge_list);
-                       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-               }
-       }
-
        /*
         * Before we free the object, make sure any pure RCU-only
         * read-side critical sections are complete, e.g.
index c851c4029597fef3a844007feddb310306926e45..3a926a8755c6cfb690a50f359f69dca90a6524c6 100644 (file)
@@ -241,6 +241,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
                        if (!can_release_pages(obj))
                                continue;
 
+                       if (!kref_get_unless_zero(&obj->base.refcount))
+                               continue;
+
                        spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 
                        if (unsafe_drop_pages(obj)) {
@@ -253,7 +256,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
                                }
                                mutex_unlock(&obj->mm.lock);
                        }
+
                        scanned += obj->base.size >> PAGE_SHIFT;
+                       i915_gem_object_put(obj);
 
                        spin_lock_irqsave(&i915->mm.obj_lock, flags);
                }