]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Hold irq-off for the entire fake lock period
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 23 Aug 2019 13:26:46 +0000 (14:26 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 23 Aug 2019 18:44:21 +0000 (19:44 +0100)
Sadly lockdep records when the irqs are re-enabled and then marks up the
fake lock as being irq-unsafe. Our hand is forced and so we must mark up
the entire fake lock critical section as irq-off.

Hopefully this is the last tweak required!

v2: Not quite, we need to mark the timeline spinlock as irqsafe. That
was a genuine bug being hidden by the earlier lockdep splat.

Fixes: d67739268cf0 ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190823132700.25286-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_request.c

index a372d4ea9370ad4159dc8887485ed0ddf5458a3b..65b5ca74b3947ae5e12382fed6157fcdea3a7241 100644 (file)
@@ -39,27 +39,32 @@ static int __engine_unpark(struct intel_wakeref *wf)
 
 #if IS_ENABLED(CONFIG_LOCKDEP)
 
-static inline void __timeline_mark_lock(struct intel_context *ce)
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
 {
        unsigned long flags;
 
        local_irq_save(flags);
        mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
-       local_irq_restore(flags);
+
+       return flags;
 }
 
-static inline void __timeline_mark_unlock(struct intel_context *ce)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+                                         unsigned long flags)
 {
        mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
+       local_irq_restore(flags);
 }
 
 #else
 
-static inline void __timeline_mark_lock(struct intel_context *ce)
+static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
 {
+       return 0;
 }
 
-static inline void __timeline_mark_unlock(struct intel_context *ce)
+static inline void __timeline_mark_unlock(struct intel_context *ce,
+                                         unsigned long flags)
 {
 }
 
@@ -68,6 +73,8 @@ static inline void __timeline_mark_unlock(struct intel_context *ce)
 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 {
        struct i915_request *rq;
+       unsigned long flags;
+       bool result = true;
 
        /* Already inside the kernel context, safe to power down. */
        if (engine->wakeref_serial == engine->serial)
@@ -89,12 +96,12 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
         * retiring the last request, thus all rings should be empty and
         * all timelines idle.
         */
-       __timeline_mark_lock(engine->kernel_context);
+       flags = __timeline_mark_lock(engine->kernel_context);
 
        rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
        if (IS_ERR(rq))
                /* Context switch failed, hope for the best! Maybe reset? */
-               return true;
+               goto out_unlock;
 
        intel_timeline_enter(rq->timeline);
 
@@ -110,9 +117,10 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
        __intel_wakeref_defer_park(&engine->wakeref);
        __i915_request_queue(rq, NULL);
 
-       __timeline_mark_unlock(engine->kernel_context);
-
-       return false;
+       result = false;
+out_unlock:
+       __timeline_mark_unlock(engine->kernel_context, flags);
+       return result;
 }
 
 static int __engine_park(struct intel_wakeref *wf)
index 077716442c90a4832c7acdde3c4eaaf1c05c8987..b9d84d52e98642b691ede4cc0107574e866e6a7a 100644 (file)
@@ -792,6 +792,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
 {
        struct intel_gt_timelines *timelines = &gt->timelines;
        struct intel_timeline *tl;
+       unsigned long flags;
 
        if (!test_bit(I915_WEDGED, &gt->reset.flags))
                return true;
@@ -811,7 +812,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
         *
         * No more can be submitted until we reset the wedged bit.
         */
-       spin_lock(&timelines->lock);
+       spin_lock_irqsave(&timelines->lock, flags);
        list_for_each_entry(tl, &timelines->active_list, link) {
                struct i915_request *rq;
 
@@ -819,7 +820,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
                if (!rq)
                        continue;
 
-               spin_unlock(&timelines->lock);
+               spin_unlock_irqrestore(&timelines->lock, flags);
 
                /*
                 * All internal dependencies (i915_requests) will have
@@ -832,10 +833,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
                i915_request_put(rq);
 
                /* Restart iteration after droping lock */
-               spin_lock(&timelines->lock);
+               spin_lock_irqsave(&timelines->lock, flags);
                tl = list_entry(&timelines->active_list, typeof(*tl), link);
        }
-       spin_unlock(&timelines->lock);
+       spin_unlock_irqrestore(&timelines->lock, flags);
 
        intel_gt_sanitize(gt, false);
 
index 02fbe11b671be36507b2e1b8f480729c51f55c79..9cb01d9828f1dbc189793c297b9cf42d7088bf00 100644 (file)
@@ -337,6 +337,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
 void intel_timeline_enter(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
+       unsigned long flags;
 
        lockdep_assert_held(&tl->mutex);
 
@@ -345,14 +346,15 @@ void intel_timeline_enter(struct intel_timeline *tl)
                return;
        GEM_BUG_ON(!tl->active_count); /* overflow? */
 
-       spin_lock(&timelines->lock);
+       spin_lock_irqsave(&timelines->lock, flags);
        list_add(&tl->link, &timelines->active_list);
-       spin_unlock(&timelines->lock);
+       spin_unlock_irqrestore(&timelines->lock, flags);
 }
 
 void intel_timeline_exit(struct intel_timeline *tl)
 {
        struct intel_gt_timelines *timelines = &tl->gt->timelines;
+       unsigned long flags;
 
        lockdep_assert_held(&tl->mutex);
 
@@ -360,9 +362,9 @@ void intel_timeline_exit(struct intel_timeline *tl)
        if (--tl->active_count)
                return;
 
-       spin_lock(&timelines->lock);
+       spin_lock_irqsave(&timelines->lock, flags);
        list_del(&tl->link);
-       spin_unlock(&timelines->lock);
+       spin_unlock_irqrestore(&timelines->lock, flags);
 
        /*
         * Since this timeline is idle, all bariers upon which we were waiting
index eb31b69a316a933f34e7d2ec5d0c43d890de59bb..ec9a46c276de41e6222acc769098c43d60ed8a1a 100644 (file)
@@ -889,12 +889,13 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
 
 static long
 wait_for_timelines(struct drm_i915_private *i915,
-                  unsigned int flags, long timeout)
+                  unsigned int wait, long timeout)
 {
        struct intel_gt_timelines *timelines = &i915->gt.timelines;
        struct intel_timeline *tl;
+       unsigned long flags;
 
-       spin_lock(&timelines->lock);
+       spin_lock_irqsave(&timelines->lock, flags);
        list_for_each_entry(tl, &timelines->active_list, link) {
                struct i915_request *rq;
 
@@ -902,7 +903,7 @@ wait_for_timelines(struct drm_i915_private *i915,
                if (!rq)
                        continue;
 
-               spin_unlock(&timelines->lock);
+               spin_unlock_irqrestore(&timelines->lock, flags);
 
                /*
                 * "Race-to-idle".
@@ -913,19 +914,19 @@ wait_for_timelines(struct drm_i915_private *i915,
                 * want to complete as quickly as possible to avoid prolonged
                 * stalls, so allow the gpu to boost to maximum clocks.
                 */
-               if (flags & I915_WAIT_FOR_IDLE_BOOST)
+               if (wait & I915_WAIT_FOR_IDLE_BOOST)
                        gen6_rps_boost(rq);
 
-               timeout = i915_request_wait(rq, flags, timeout);
+               timeout = i915_request_wait(rq, wait, timeout);
                i915_request_put(rq);
                if (timeout < 0)
                        return timeout;
 
                /* restart after reacquiring the lock */
-               spin_lock(&timelines->lock);
+               spin_lock_irqsave(&timelines->lock, flags);
                tl = list_entry(&timelines->active_list, typeof(*tl), link);
        }
-       spin_unlock(&timelines->lock);
+       spin_unlock_irqrestore(&timelines->lock, flags);
 
        return timeout;
 }
index f1a0a57fc6fc8b895cb52722527829abb90e3e19..a53777dd371c37ab7ea69c1e958a9a045a983972 100644 (file)
@@ -1465,9 +1465,10 @@ bool i915_retire_requests(struct drm_i915_private *i915)
 {
        struct intel_gt_timelines *timelines = &i915->gt.timelines;
        struct intel_timeline *tl, *tn;
+       unsigned long flags;
        LIST_HEAD(free);
 
-       spin_lock(&timelines->lock);
+       spin_lock_irqsave(&timelines->lock, flags);
        list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
                if (!mutex_trylock(&tl->mutex))
                        continue;
@@ -1475,11 +1476,11 @@ bool i915_retire_requests(struct drm_i915_private *i915)
                intel_timeline_get(tl);
                GEM_BUG_ON(!tl->active_count);
                tl->active_count++; /* pin the list element */
-               spin_unlock(&timelines->lock);
+               spin_unlock_irqrestore(&timelines->lock, flags);
 
                retire_requests(tl);
 
-               spin_lock(&timelines->lock);
+               spin_lock_irqsave(&timelines->lock, flags);
 
                /* Resume iteration after dropping lock */
                list_safe_reset_next(tl, tn, link);
@@ -1494,7 +1495,7 @@ bool i915_retire_requests(struct drm_i915_private *i915)
                        list_add(&tl->link, &free);
                }
        }
-       spin_unlock(&timelines->lock);
+       spin_unlock_irqrestore(&timelines->lock, flags);
 
        list_for_each_entry_safe(tl, tn, &free, link)
                __intel_timeline_free(&tl->kref);