]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Pull seqno started checks together
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 6 Aug 2018 11:26:05 +0000 (12:26 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 7 Aug 2018 11:43:00 +0000 (12:43 +0100)
We have a few instances of checking seqno-1 to see if the HW has started
the request. Pull those together under a helper.

v2: Pull the !seqno assertion higher, as given seqno==1 we may indeed
check to see if we have started using seqno==0.

Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180806112605.20725-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 5c2c93cbab12f8ebff29507a00953a24a9a877c6..09ed48833b5496a9c5b6501ed128cf4bfd504167 100644 (file)
@@ -527,7 +527,7 @@ void __i915_request_submit(struct i915_request *request)
 
        seqno = timeline_get_seqno(&engine->timeline);
        GEM_BUG_ON(!seqno);
-       GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+       GEM_BUG_ON(intel_engine_signaled(engine, seqno));
 
        /* We may be recursing from the signal callback of another i915 fence */
        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -579,8 +579,7 @@ void __i915_request_unsubmit(struct i915_request *request)
         */
        GEM_BUG_ON(!request->global_seqno);
        GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
-       GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
-                                    request->global_seqno));
+       GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
        engine->timeline.seqno--;
 
        /* We may be recursing from the signal callback of another i915 fence */
@@ -1205,7 +1204,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
         * it is a fair assumption that it will not complete within our
         * relatively short timeout.
         */
-       if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
+       if (!intel_engine_has_started(engine, seqno))
                return false;
 
        /*
@@ -1222,7 +1221,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
        irq = READ_ONCE(engine->breadcrumbs.irq_count);
        timeout_us += local_clock_us(&cpu);
        do {
-               if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
+               if (intel_engine_has_completed(engine, seqno))
                        return seqno == i915_request_global_seqno(rq);
 
                /*
index e1c9365dfefb1ef9ddf80183ecea07185b6d7ed4..9898301ab7ef5e3dc096e13cc0d24ddcef35cb41 100644 (file)
@@ -272,7 +272,10 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
 #define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+                                           u32 seqno);
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+                                             u32 seqno);
 
 /**
  * Returns true if seq1 is later than seq2.
@@ -282,11 +285,31 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
        return (s32)(seq1 - seq2) >= 0;
 }
 
+/**
+ * i915_request_started - check if the request has begun being executed
+ * @rq: the request
+ *
+ * Returns true if the request has been submitted to hardware, and the hardware
+ * has advanced passed the end of the previous request and so should be either
+ * currently processing the request (though it may be preempted and so
+ * not necessarily the next request to complete) or have completed the request.
+ */
+static inline bool i915_request_started(const struct i915_request *rq)
+{
+       u32 seqno;
+
+       seqno = i915_request_global_seqno(rq);
+       if (!seqno) /* not yet submitted to HW */
+               return false;
+
+       return intel_engine_has_started(rq->engine, seqno);
+}
+
 static inline bool
 __i915_request_completed(const struct i915_request *rq, u32 seqno)
 {
        GEM_BUG_ON(!seqno);
-       return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
+       return intel_engine_has_completed(rq->engine, seqno) &&
                seqno == i915_request_global_seqno(rq);
 }
 
@@ -301,18 +324,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
        return __i915_request_completed(rq, seqno);
 }
 
-static inline bool i915_request_started(const struct i915_request *rq)
-{
-       u32 seqno;
-
-       seqno = i915_request_global_seqno(rq);
-       if (!seqno)
-               return false;
-
-       return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
-                                seqno - 1);
-}
-
 static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
 {
        const struct i915_request *rq =
index 1db6ba7d926ee3b27b0c00e9bf82f956173e8b59..84bf8d827136dc28515216df4bec41528024d9e5 100644 (file)
@@ -256,8 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
        spin_unlock(&b->irq_lock);
 
        rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
-               GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
-                                             wait->seqno));
+               GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
                RB_CLEAR_NODE(&wait->node);
                wake_up_process(wait->tsk);
        }
@@ -508,8 +507,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
                return armed;
 
        /* Make the caller recheck if its request has already started. */
-       return i915_seqno_passed(intel_engine_get_seqno(engine),
-                                wait->seqno - 1);
+       return intel_engine_has_started(engine, wait->seqno);
 }
 
 static inline bool chain_wakeup(struct rb_node *rb, int priority)
index 67c4fc5d737cc0e68aee5403bc409d0b6c60db86..99d5a24219c13041c83cfa226ca958a7b51a98d9 100644 (file)
@@ -968,8 +968,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return true;
 
        /* Any inflight/incomplete requests? */
-       if (!i915_seqno_passed(intel_engine_get_seqno(engine),
-                              intel_engine_last_submit(engine)))
+       if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
                return false;
 
        if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
index 2fc7a0dd0df9b2bc7a88814f75d98334bfd4d4e9..e26d05a46451fe7f604474aa044e8f1307022177 100644 (file)
@@ -142,7 +142,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
 
-       if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
+       if (intel_engine_signaled(signaller, seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
index 8837079cb8b3df7d5b32334a06dbdb6587c891f1..9090885d57dea5bf224b52d66f8fec936a794cbf 100644 (file)
@@ -910,14 +910,10 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine);
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
 {
-       /* We are only peeking at the tail of the submit queue (and not the
+       /*
+        * We are only peeking at the tail of the submit queue (and not the
         * queue itself) in order to gain a hint as to the current active
         * state of the engine. Callers are not expected to be taking
         * engine->timeline->lock, nor are they expected to be concerned
@@ -927,6 +923,31 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
        return READ_ONCE(engine->timeline.seqno);
 }
 
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
+
+static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
+                                        u32 seqno)
+{
+       return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
+}
+
+static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
+                                             u32 seqno)
+{
+       GEM_BUG_ON(!seqno);
+       return intel_engine_signaled(engine, seqno);
+}
+
+static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
+                                           u32 seqno)
+{
+       GEM_BUG_ON(!seqno);
+       return intel_engine_signaled(engine, seqno - 1);
+}
+
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
                               struct intel_instdone *instdone);