]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Disable semaphore busywaits on saturated systems
authorChris Wilson <chris@chris-wilson.co.uk>
Sat, 4 May 2019 07:07:07 +0000 (08:07 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Sat, 4 May 2019 08:18:02 +0000 (09:18 +0100)
Asking the GPU to busywait on a memory address, perhaps not unexpectedly
in hindsight for a shared system, leads to bus contention that affects
CPU programs trying to concurrently access memory. This can manifest as
a drop in transcode throughput on highly over-saturated workloads.

The only clue offered by perf, is that the bus-cycles (perf stat -e
bus-cycles) jumped by 50% when enabling semaphores. This corresponds
with extra CPU active cycles being attributed to intel_idle's mwait.

This patch introduces a heuristic to try and detect when more than one
client is submitting to the GPU pushing it into an oversaturated state.
As we already keep track of when the semaphores are signaled, we can
inspect their state on submitting the busywait batch and if we planned
to use a semaphore but were too late, conclude that the GPU is
overloaded and not try to use semaphores in future requests. In
practice, this means we optimistically try to use semaphores for the
first frame of a transcode job split over multiple engines, and fail if
there are multiple clients active and continue not to use semaphores for
the subsequent frames in the sequence. Periodically, we try to
optimistically switch semaphores back on whenever the client waits to
catch up with the transcode results.

With 1 client, on Broxton J3455, with the relative fps normalized by %cpu:

x no semaphores
+ drm-tip
* patched
+------------------------------------------------------------------------+
|                                                    *                   |
|                                                    *+                  |
|                                                    **+                 |
|                                                    **+  x              |
|                                x               *  +**+  x              |
|                                x  x       *    *  +***x xx             |
|                                x  x       *    * *+***x *x             |
|                                x  x*   +  *    * *****x *x x           |
|                         +    x xx+x*   + ***   * ********* x   *       |
|                         +    x xx+x*   * *** +** ********* xx  *       |
|    *   +         ++++*  +    x*x****+*+* ***+*************+x*  *       |
|*+ +** *+ + +* + *++****** *xxx**********x***+*****************+*++    *|
|                                   |__________A_____M_____|             |
|                           |_______________A____M_________|             |
|                                 |____________A___M________|            |
+------------------------------------------------------------------------+
    N           Min           Max        Median           Avg        Stddev
x 120       2.60475       3.50941       3.31123     3.2143953    0.21117399
+ 120        2.3826       3.57077       3.25101     3.1414161    0.28146407
Difference at 95.0% confidence
-0.0729792 +/- 0.0629585
-2.27039% +/- 1.95864%
(Student's t, pooled s = 0.248814)
* 120       2.35536       3.66713        3.2849     3.2059917    0.24618565
No difference proven at 95.0% confidence

With 10 clients over-saturating the pipeline:

x no semaphores
+ drm-tip
* patched
+------------------------------------------------------------------------+
|                     ++                                        **       |
|                     ++                                        **       |
|                     ++                                        **       |
|                     ++                                        **       |
|                     ++                                    xx ***       |
|                     ++                                    xx ***       |
|                     ++                                    xxx***       |
|                     ++                                    xxx***       |
|                    +++                                    xxx***       |
|                    +++                                    xx****       |
|                    +++                                    xx****       |
|                    +++                                    xx****       |
|                    +++                                    xx****       |
|                    ++++                                   xx****       |
|                   +++++                                   xx****       |
|                   +++++                                 x x******      |
|                  ++++++                                 xxx*******     |
|                  ++++++                                 xxx*******     |
|                  ++++++                                 xxx*******     |
|                  ++++++                                 xx********     |
|                  ++++++                               xxxx********     |
|                  ++++++                               xxxx********     |
|                ++++++++                             xxxxx*********     |
|+ +  +        + ++++++++                           xxx*xx**********x*  *|
|                                                         |__A__|        |
|                 |__AM__|                                               |
|                                                            |__A_|      |
+------------------------------------------------------------------------+
    N           Min           Max        Median           Avg        Stddev
x 120       2.47855        2.8972       2.72376     2.7193402   0.074604933
+ 120       1.17367       1.77459       1.71977     1.6966782   0.085850697
Difference at 95.0% confidence
-1.02266 +/- 0.0203502
-37.607% +/- 0.748352%
(Student's t, pooled s = 0.0804246)
* 120       2.57868       3.00821       2.80142     2.7923878   0.058646477
Difference at 95.0% confidence
0.0730476 +/- 0.0169791
2.68622% +/- 0.624383%
(Student's t, pooled s = 0.0671018)

Indicating that we've recovered the regression from enabling semaphores
on this saturated setup, with a hint towards an overall improvement.

Very similar, but of smaller magnitude, results are observed on both
Skylake(gt2) and Kabylake(gt4). This may be due to the reduced impact of
bus-cycles, where we see a 50% hit on Broxton, it is only 10% on the big
core, in this particular test.

One observation to make here is that for a greedy client trying to
maximise its own throughput, using semaphores is the right choice. It is
only the holistic system-wide view that semaphores of one client
impacts another and reduces the overall throughput where we would choose
to disable semaphores.

The most noticeable negactive impact this has is on the no-op
microbenchmarks, which are also very notable for having no cpu bus load.
In particular, this increases the runtime and energy consumption of
gem_exec_whisper.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Dmitry Rogozhkin <dmitry.v.rogozhkin@intel.com>
Cc: Dmitry Ermilov <dmitry.ermilov@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190504070707.30902-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/i915_request.c

index 1f1761fc6597d09ab6446c277fded7aceae96215..5b31e1e05ddd6423d708390adc338583ae1e82a0 100644 (file)
@@ -116,6 +116,7 @@ intel_context_init(struct intel_context *ce,
        ce->engine = engine;
        ce->ops = engine->cops;
        ce->sseu = engine->sseu;
+       ce->saturated = 0;
 
        INIT_LIST_HEAD(&ce->signal_link);
        INIT_LIST_HEAD(&ce->signals);
@@ -158,6 +159,7 @@ void intel_context_enter_engine(struct intel_context *ce)
 
 void intel_context_exit_engine(struct intel_context *ce)
 {
+       ce->saturated = 0;
        intel_engine_pm_put(ce->engine);
 }
 
index d5a7dbd0daeeff553b2cfaeab2c0541c4c796964..963a312430e6d37a8a3a627afab750d77df8c550 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/types.h>
 
 #include "i915_active_types.h"
+#include "intel_engine_types.h"
 #include "intel_sseu.h"
 
 struct i915_gem_context;
@@ -52,6 +53,8 @@ struct intel_context {
        atomic_t pin_count;
        struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
 
+       intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
        /**
         * active_tracker: Active tracker for the external rq activity
         * on this intel_context object.
index d06c45305b03d9af0b7f9ea11e338ed087e6f073..ae9ce46e1b15055e886a4cfce0b661b8332032b5 100644 (file)
@@ -410,6 +410,26 @@ void __i915_request_submit(struct i915_request *request)
        if (i915_gem_context_is_banned(request->gem_context))
                i915_request_skip(request, -EIO);
 
+       /*
+        * Are we using semaphores when the gpu is already saturated?
+        *
+        * Using semaphores incurs a cost in having the GPU poll a
+        * memory location, busywaiting for it to change. The continual
+        * memory reads can have a noticeable impact on the rest of the
+        * system with the extra bus traffic, stalling the cpu as it too
+        * tries to access memory across the bus (perf stat -e bus-cycles).
+        *
+        * If we installed a semaphore on this request and we only submit
+        * the request after the signaler completed, that indicates the
+        * system is overloaded and using semaphores at this time only
+        * increases the amount of work we are doing. If so, we disable
+        * further use of semaphores until we are idle again, whence we
+        * optimistically try again.
+        */
+       if (request->sched.semaphores &&
+           i915_sw_fence_signaled(&request->semaphore))
+               request->hw_context->saturated |= request->sched.semaphores;
+
        /* We may be recursing from the signal callback of another i915 fence */
        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 
@@ -785,6 +805,24 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
                                             I915_FENCE_GFP);
 }
 
+static intel_engine_mask_t
+already_busywaiting(struct i915_request *rq)
+{
+       /*
+        * Polling a semaphore causes bus traffic, delaying other users of
+        * both the GPU and CPU. We want to limit the impact on others,
+        * while taking advantage of early submission to reduce GPU
+        * latency. Therefore we restrict ourselves to not using more
+        * than one semaphore from each source, and not using a semaphore
+        * if we have detected the engine is saturated (i.e. would not be
+        * submitted early and cause bus traffic reading an already passed
+        * semaphore).
+        *
+        * See the are-we-too-late? check in __i915_request_submit().
+        */
+       return rq->sched.semaphores | rq->hw_context->saturated;
+}
+
 static int
 emit_semaphore_wait(struct i915_request *to,
                    struct i915_request *from,
@@ -798,7 +836,7 @@ emit_semaphore_wait(struct i915_request *to,
        GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
 
        /* Just emit the first semaphore we see as request space is limited. */
-       if (to->sched.semaphores & from->engine->mask)
+       if (already_busywaiting(to) & from->engine->mask)
                return i915_sw_fence_await_dma_fence(&to->submit,
                                                     &from->fence, 0,
                                                     I915_FENCE_GFP);