]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/guc: Extend deregistration fence to schedule disable
authorMatthew Brost <matthew.brost@intel.com>
Wed, 21 Jul 2021 21:50:53 +0000 (14:50 -0700)
committerJohn Harrison <John.C.Harrison@Intel.com>
Thu, 22 Jul 2021 17:07:17 +0000 (10:07 -0700)
Extend the deregistration context fence to fence whne a GuC context has
scheduling disable pending.

v2:
 (John H)
  - Update comment why we check the pin count within spin lock

Cc: John Harrison <john.c.harrison@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: John Harrison <john.c.harrison@intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-11-matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c

index 2f393d9dba0d0d55773c9b50d1553c27468f71c6..fc0b36ab1e683ea633563c7cedc7c6a411a95ae8 100644 (file)
@@ -930,7 +930,22 @@ static void guc_context_sched_disable(struct intel_context *ce)
                goto unpin;
 
        spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+       /*
+        * We have to check if the context has been pinned again as another pin
+        * operation is allowed to pass this function. Checking the pin count,
+        * within ce->guc_state.lock, synchronizes this function with
+        * guc_request_alloc ensuring a request doesn't slip through the
+        * 'context_pending_disable' fence. Checking within the spin lock (can't
+        * sleep) ensures another process doesn't pin this context and generate
+        * a request before we set the 'context_pending_disable' flag here.
+        */
+       if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+               return;
+       }
        guc_id = prep_context_pending_disable(ce);
+
        spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
        with_intel_runtime_pm(runtime_pm, wakeref)
@@ -1135,19 +1150,22 @@ static int guc_request_alloc(struct i915_request *rq)
 out:
        /*
         * We block all requests on this context if a G2H is pending for a
-        * context deregistration as the GuC will fail a context registration
-        * while this G2H is pending. Once a G2H returns, the fence is released
-        * that is blocking these requests (see guc_signal_context_fence).
+        * schedule disable or context deregistration as the GuC will fail a
+        * schedule enable or context registration if either G2H is pending
+        * respectfully. Once a G2H returns, the fence is released that is
+        * blocking these requests (see guc_signal_context_fence).
         *
-        * We can safely check the below field outside of the lock as it isn't
-        * possible for this field to transition from being clear to set but
+        * We can safely check the below fields outside of the lock as it isn't
+        * possible for these fields to transition from being clear to set but
         * converse is possible, hence the need for the check within the lock.
         */
-       if (likely(!context_wait_for_deregister_to_register(ce)))
+       if (likely(!context_wait_for_deregister_to_register(ce) &&
+                  !context_pending_disable(ce)))
                return 0;
 
        spin_lock_irqsave(&ce->guc_state.lock, flags);
-       if (context_wait_for_deregister_to_register(ce)) {
+       if (context_wait_for_deregister_to_register(ce) ||
+           context_pending_disable(ce)) {
                i915_sw_fence_await(&rq->submit);
 
                list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
@@ -1491,10 +1509,18 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
        if (context_pending_enable(ce)) {
                clr_context_pending_enable(ce);
        } else if (context_pending_disable(ce)) {
+               /*
+                * Unpin must be done before __guc_signal_context_fence,
+                * otherwise a race exists between the requests getting
+                * submitted + retired before this unpin completes resulting in
+                * the pin_count going to zero and the context still being
+                * enabled.
+                */
                intel_context_sched_disable_unpin(ce);
 
                spin_lock_irqsave(&ce->guc_state.lock, flags);
                clr_context_pending_disable(ce);
+               __guc_signal_context_fence(ce);
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
        }