]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/guc: Release submit fence from an irq_work
authorMatthew Brost <matthew.brost@intel.com>
Thu, 9 Sep 2021 16:47:36 +0000 (09:47 -0700)
committerJohn Harrison <John.C.Harrison@Intel.com>
Mon, 13 Sep 2021 18:30:44 +0000 (11:30 -0700)
A subsequent patch will flip the locking hierarchy from
ce->guc_state.lock -> sched_engine->lock to sched_engine->lock ->
ce->guc_state.lock. As such we need to release the submit fence for a
request from an IRQ to break a lock inversion - i.e. the fence must be
release went holding ce->guc_state.lock and the releasing of the can
acquire sched_engine->lock.

v2:
 (Daniele)
  - Delete request from list before calling irq_work_queue

Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-16-matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_request.h

index dcd7a09f8559fecb149971a66ea1e9dc348c58c1..4b7ccf9730a3177c27d0980a041380db0b6acee0 100644 (file)
@@ -2049,17 +2049,32 @@ static const struct intel_context_ops guc_context_ops = {
        .create_virtual = guc_create_virtual,
 };
 
+static void submit_work_cb(struct irq_work *wrk)
+{
+       struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+       might_lock(&rq->engine->sched_engine->lock);
+       i915_sw_fence_complete(&rq->submit);
+}
+
 static void __guc_signal_context_fence(struct intel_context *ce)
 {
-       struct i915_request *rq;
+       struct i915_request *rq, *rn;
 
        lockdep_assert_held(&ce->guc_state.lock);
 
        if (!list_empty(&ce->guc_state.fences))
                trace_intel_context_fence_release(ce);
 
-       list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
-               i915_sw_fence_complete(&rq->submit);
+       /*
+        * Use an IRQ to ensure locking order of sched_engine->lock ->
+        * ce->guc_state.lock is preserved.
+        */
+       list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
+                                guc_fence_link) {
+               list_del(&rq->guc_fence_link);
+               irq_work_queue(&rq->submit_work);
+       }
 
        INIT_LIST_HEAD(&ce->guc_state.fences);
 }
@@ -2169,6 +2184,7 @@ out:
        spin_lock_irqsave(&ce->guc_state.lock, flags);
        if (context_wait_for_deregister_to_register(ce) ||
            context_pending_disable(ce)) {
+               init_irq_work(&rq->submit_work, submit_work_cb);
                i915_sw_fence_await(&rq->submit);
 
                list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
index 1bc1349ba3c255e2b1f95822a7fd6c3b3a246759..d818cfbfc41d2b92a2ff1932f3abcb56843c804c 100644 (file)
@@ -218,6 +218,11 @@ struct i915_request {
        };
        struct llist_head execute_cb;
        struct i915_sw_fence semaphore;
+       /**
+        * @submit_work: complete submit fence from an IRQ if needed for
+        * locking hierarchy reasons.
+        */
+       struct irq_work submit_work;
 
        /*
         * A list of everyone we wait upon, and everyone who waits upon us.