]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gt: Prefer soft-rc6 over RPS DOWN_TIMEOUT
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 22 Apr 2020 00:17:01 +0000 (01:17 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 24 Apr 2020 16:20:58 +0000 (17:20 +0100)
The RPS DOWN_TIMEOUT interrupt is signaled after a period of rc6, and
upon receipt of that interrupt we reprogram the GPU clocks down to the
next idle notch [to help convserve power during rc6]. However, on
execlists, we benefit from soft-rc6 immediately parking the GPU and
setting idle frequencies upon idling [within a jiffie], and here the
interrupt prevents us from restarting from our last frequency.

In the process, we can simply opt for a static pm_events mask and rely
on the enable/disable interrupts to flush the worker on parking.

This will reduce the amount of oscillation observed during steady
workloads with microsleeps, as each time the rc6 timeout occurs we
immediately follow with a waitboost for a dropped frame.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200422001703.1697-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_rps.c

index 4dcfae16a7cea7c9f016c4cfad56905ede4ba725..785cd58fba76bf3f6387f02a683e2545b0e4c2d0 100644 (file)
@@ -57,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
        if (val < rps->max_freq_softlimit)
                mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
 
-       mask &= READ_ONCE(rps->pm_events);
+       mask &= rps->pm_events;
 
        return rps_pm_sanitize_mask(rps, ~mask);
 }
@@ -70,19 +70,9 @@ static void rps_reset_ei(struct intel_rps *rps)
 static void rps_enable_interrupts(struct intel_rps *rps)
 {
        struct intel_gt *gt = rps_to_gt(rps);
-       u32 events;
 
        rps_reset_ei(rps);
 
-       if (IS_VALLEYVIEW(gt->i915))
-               /* WaGsvRC0ResidencyMethod:vlv */
-               events = GEN6_PM_RP_UP_EI_EXPIRED;
-       else
-               events = (GEN6_PM_RP_UP_THRESHOLD |
-                         GEN6_PM_RP_DOWN_THRESHOLD |
-                         GEN6_PM_RP_DOWN_TIMEOUT);
-       WRITE_ONCE(rps->pm_events, events);
-
        spin_lock_irq(&gt->irq_lock);
        gen6_gt_pm_enable_irq(gt, rps->pm_events);
        spin_unlock_irq(&gt->irq_lock);
@@ -120,8 +110,6 @@ static void rps_disable_interrupts(struct intel_rps *rps)
 {
        struct intel_gt *gt = rps_to_gt(rps);
 
-       WRITE_ONCE(rps->pm_events, 0);
-
        intel_uncore_write(gt->uncore,
                           GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
 
@@ -919,12 +907,10 @@ static bool gen9_rps_enable(struct intel_rps *rps)
                intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
                                      GEN9_FREQUENCY(rps->rp1_freq));
 
-       /* 1 second timeout */
-       intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
-                             GT_INTERVAL_FROM_US(i915, 1000000));
-
        intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
 
+       rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
        return rps_reset(rps);
 }
 
@@ -935,12 +921,10 @@ static bool gen8_rps_enable(struct intel_rps *rps)
        intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
                              HSW_FREQUENCY(rps->rp1_freq));
 
-       /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
-       intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
-                             100000000 / 128); /* 1 second timeout */
-
        intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
 
+       rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
+
        return rps_reset(rps);
 }
 
@@ -952,6 +936,10 @@ static bool gen6_rps_enable(struct intel_rps *rps)
        intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
        intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
 
+       rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+                         GEN6_PM_RP_DOWN_THRESHOLD |
+                         GEN6_PM_RP_DOWN_TIMEOUT);
+
        return rps_reset(rps);
 }
 
@@ -1037,6 +1025,10 @@ static bool chv_rps_enable(struct intel_rps *rps)
                              GEN6_RP_UP_BUSY_AVG |
                              GEN6_RP_DOWN_IDLE_AVG);
 
+       rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+                         GEN6_PM_RP_DOWN_THRESHOLD |
+                         GEN6_PM_RP_DOWN_TIMEOUT);
+
        /* Setting Fixed Bias */
        vlv_punit_get(i915);
 
@@ -1135,6 +1127,9 @@ static bool vlv_rps_enable(struct intel_rps *rps)
                              GEN6_RP_UP_BUSY_AVG |
                              GEN6_RP_DOWN_IDLE_CONT);
 
+       /* WaGsvRC0ResidencyMethod:vlv */
+       rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+
        vlv_punit_get(i915);
 
        /* Setting Fixed Bias */
@@ -1469,7 +1464,7 @@ static void rps_work(struct work_struct *work)
        u32 pm_iir = 0;
 
        spin_lock_irq(&gt->irq_lock);
-       pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
+       pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
        client_boost = atomic_read(&rps->num_waiters);
        spin_unlock_irq(&gt->irq_lock);
 
@@ -1572,7 +1567,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
        struct intel_gt *gt = rps_to_gt(rps);
        u32 events;
 
-       events = pm_iir & READ_ONCE(rps->pm_events);
+       events = pm_iir & rps->pm_events;
        if (events) {
                spin_lock(&gt->irq_lock);