]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gt: execlists->active is serialised by the tasklet
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 9 Oct 2019 16:09:06 +0000 (17:09 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 9 Oct 2019 18:54:46 +0000 (19:54 +0100)
The active/pending execlists is no longer protected by the
engine->active.lock, but is serialised by the tasklet instead. Update
the locking around the debug and stats to follow suit.

v2: local_bh_disable() to prevent recursing into the tasklet in case we
trigger a softirq (Tvrtko)

Fixes: 4a24537b5914 ("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191009160906.16195-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/i915_gem.h

index d624752f2a921054657c9184898580d7d9649314..93ea367fe624682c3275fc933eb3cac11cec58c3 100644 (file)
@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
        return READ_ONCE(*execlists->active);
 }
 
+static inline void
+execlists_active_lock_bh(struct intel_engine_execlists *execlists)
+{
+       local_bh_disable(); /* prevent local softirq and lock recursion */
+       tasklet_lock(&execlists->tasklet);
+}
+
+static inline void
+execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
+{
+       tasklet_unlock(&execlists->tasklet);
+       local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
+}
+
 struct i915_request *
 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
 
index 5aa1371f6a0fde94e407cd2ba88460d81d5995d3..c9d639c6becb8aca8443b6cce83fcf9e61e011d5 100644 (file)
@@ -1245,9 +1245,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                         struct drm_printer *m)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       const struct intel_engine_execlists * const execlists =
-               &engine->execlists;
-       unsigned long flags;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        u64 addr;
 
        if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
@@ -1329,7 +1327,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                   idx, hws[idx * 2], hws[idx * 2 + 1]);
                }
 
-               spin_lock_irqsave(&engine->active.lock, flags);
+               execlists_active_lock_bh(execlists);
                for (port = execlists->active; (rq = *port); port++) {
                        char hdr[80];
                        int len;
@@ -1367,7 +1365,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                        if (tl)
                                intel_timeline_put(tl);
                }
-               spin_unlock_irqrestore(&engine->active.lock, flags);
+               execlists_active_unlock_bh(execlists);
        } else if (INTEL_GEN(dev_priv) > 6) {
                drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
                           ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1509,8 +1507,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        if (!intel_engine_supports_stats(engine))
                return -ENODEV;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
-       write_seqlock(&engine->stats.lock);
+       execlists_active_lock_bh(execlists);
+       write_seqlock_irqsave(&engine->stats.lock, flags);
 
        if (unlikely(engine->stats.enabled == ~0)) {
                err = -EBUSY;
@@ -1538,8 +1536,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        }
 
 unlock:
-       write_sequnlock(&engine->stats.lock);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       write_sequnlock_irqrestore(&engine->stats.lock, flags);
+       execlists_active_unlock_bh(execlists);
 
        return err;
 }
index 167a7b56ed5b5e47c8a502d55cf84db127bec51d..6795f1daa3d500d9076d5c7e387bb38bec15caed 100644 (file)
@@ -77,6 +77,12 @@ struct drm_i915_private;
 
 #define I915_GEM_IDLE_TIMEOUT (HZ / 5)
 
+static inline void tasklet_lock(struct tasklet_struct *t)
+{
+       while (!tasklet_trylock(t))
+               cpu_relax();
+}
+
 static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
 {
        if (!atomic_fetch_inc(&t->count))