]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/blt: Remove recursive vma->lock
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 21 Jun 2019 21:57:33 +0000 (22:57 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 21 Jun 2019 23:52:25 +0000 (00:52 +0100)
As we have already plugged the w->dma into the reservation_object, and
have set ourselves up to automatically signal the request and w->dma on
completion, we do not need to export the rq->fence directly and just use
the w->dma fence.

This avoids having to take the reservation_lock inside the worker which
cross-release lockdep would complain about. :)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621215733.12070-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c

index 1fdab0767a4785831d44a0cbeec5075abf0315d3..9b01c3b5b31d08375c5afb57914944c0551e7d23 100644 (file)
@@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
        vma->ops = &proxy_vma_ops;
 
        sleeve->vma = vma;
-       sleeve->obj = i915_gem_object_get(obj);
        sleeve->pages = pages;
        sleeve->page_sizes = *page_sizes;
 
@@ -85,7 +84,6 @@ err_free:
 
 static void destroy_sleeve(struct i915_sleeve *sleeve)
 {
-       i915_gem_object_put(sleeve->obj);
        kfree(sleeve);
 }
 
@@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work)
 {
        struct clear_pages_work *w = container_of(work, typeof(*w), work);
        struct drm_i915_private *i915 = w->ce->gem_context->i915;
-       struct drm_i915_gem_object *obj = w->sleeve->obj;
+       struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
        struct i915_vma *vma = w->sleeve->vma;
        struct i915_request *rq;
        int err = w->dma.error;
@@ -193,10 +191,12 @@ static void clear_pages_worker(struct work_struct *work)
                        goto out_request;
        }
 
-       /* XXX: more feverish nightmares await */
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(vma);
+       /*
+        * w->dma is already exported via (vma|obj)->resv we need only
+        * keep track of the GPU activity within this vma/request, and
+        * propagate the signal from the request to w->dma.
+        */
+       err = i915_active_ref(&vma->active, rq->fence.context, rq);
        if (err)
                goto out_request;
 
index f3a5eb807c1cad3b0e62aec3eecff42b45d64f32..855481252bdacda7487f44853a60f0b1870edbb4 100644 (file)
@@ -63,17 +63,6 @@ static int igt_client_fill(void *arg)
                if (err)
                        goto err_unpin;
 
-               /*
-                * XXX: For now do the wait without the object resv lock to
-                * ensure we don't deadlock.
-                */
-               err = i915_gem_object_wait(obj,
-                                          I915_WAIT_INTERRUPTIBLE |
-                                          I915_WAIT_ALL,
-                                          MAX_SCHEDULE_TIMEOUT);
-               if (err)
-                       goto err_unpin;
-
                i915_gem_object_lock(obj);
                err = i915_gem_object_set_to_cpu_domain(obj, false);
                i915_gem_object_unlock(obj);