struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = >->rps;
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+ if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
u32 rpup, rpupei;
u32 rpdown, rpdownei;
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && READ_ONCE(rps->active))
+ if (!rps->power.interactive++ && intel_rps_is_active(rps))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
void intel_rps_unpark(struct intel_rps *rps)
{
- if (!rps->enabled)
+ if (!intel_rps_is_enabled(rps))
return;
GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
*/
mutex_lock(&rps->lock);
- WRITE_ONCE(rps->active, true);
-
+ intel_rps_set_active(rps);
intel_rps_set(rps,
clamp(rps->cur_freq,
rps->min_freq_softlimit,
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- if (!rps->enabled)
+ if (!intel_rps_clear_active(rps))
return;
if (INTEL_GEN(i915) >= 6)
rps_disable_interrupts(rps);
- WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
+ if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
return;
/* Serializes with i915_request_retire() */
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
- if (rps->active) {
+ if (intel_rps_is_active(rps)) {
err = rps_set(rps, val, true);
if (err)
return err;
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
+ bool enabled = false;
if (!HAS_RPS(i915))
return;
intel_gt_check_clock_frequency(rps_to_gt(rps));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (IS_CHERRYVIEW(i915))
- rps->enabled = chv_rps_enable(rps);
+ if (rps->max_freq <= rps->min_freq)
+ /* leave disabled, no room for dynamic reclocking */;
+ else if (IS_CHERRYVIEW(i915))
+ enabled = chv_rps_enable(rps);
else if (IS_VALLEYVIEW(i915))
- rps->enabled = vlv_rps_enable(rps);
+ enabled = vlv_rps_enable(rps);
else if (INTEL_GEN(i915) >= 9)
- rps->enabled = gen9_rps_enable(rps);
+ enabled = gen9_rps_enable(rps);
else if (INTEL_GEN(i915) >= 8)
- rps->enabled = gen8_rps_enable(rps);
+ enabled = gen8_rps_enable(rps);
else if (INTEL_GEN(i915) >= 6)
- rps->enabled = gen6_rps_enable(rps);
+ enabled = gen6_rps_enable(rps);
else if (IS_IRONLAKE_M(i915))
- rps->enabled = gen5_rps_enable(rps);
+ enabled = gen5_rps_enable(rps);
+ else
+ MISSING_CASE(INTEL_GEN(i915));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
- if (!rps->enabled)
+ if (!enabled)
return;
GT_TRACE(rps_to_gt(rps),
GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
+
+ intel_rps_set_enabled(rps);
}
static void gen6_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- rps->enabled = false;
+ intel_rps_clear_enabled(rps);
if (INTEL_GEN(i915) >= 6)
gen6_rps_disable(rps);
goto out;
mutex_lock(&rps->lock);
+ if (!intel_rps_is_active(rps)) {
+ mutex_unlock(&rps->lock);
+ return;
+ }
pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+static inline bool intel_rps_is_enabled(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_set_enabled(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline void intel_rps_clear_enabled(struct intel_rps *rps)
+{
+ clear_bit(INTEL_RPS_ENABLED, &rps->flags);
+}
+
+static inline bool intel_rps_is_active(const struct intel_rps *rps)
+{
+ return test_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline void intel_rps_set_active(struct intel_rps *rps)
+{
+ set_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
+static inline bool intel_rps_clear_active(struct intel_rps *rps)
+{
+ return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags);
+}
+
#endif /* INTEL_RPS_H */
u32 media_c0;
};
+enum {
+ INTEL_RPS_ENABLED = 0,
+ INTEL_RPS_ACTIVE,
+};
+
struct intel_rps {
struct mutex lock; /* protects enabling and the worker */
* dev_priv->irq_lock
*/
struct work_struct work;
- bool enabled;
- bool active;
+ unsigned long flags;
u32 pm_iir;
/* PM interrupt bits that should never be masked */
static u8 rps_set_check(struct intel_rps *rps, u8 freq)
{
mutex_lock(&rps->lock);
- GEM_BUG_ON(!rps->active);
+ GEM_BUG_ON(!intel_rps_is_active(rps));
intel_rps_set(rps, freq);
GEM_BUG_ON(rps->last_freq != freq);
mutex_unlock(&rps->lock);
struct igt_spinner spin;
int err = 0;
- if (!rps->enabled)
+ if (!intel_rps_is_enabled(rps))
return 0;
if (igt_spinner_init(&spin, gt))
* will be lowered than requested.
*/
- if (!rps->enabled || rps->max_freq <= rps->min_freq)
+ if (!intel_rps_is_enabled(rps))
return 0;
if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */
* frequency, the actual frequency, and the observed clock rate.
*/
- if (!rps->enabled || rps->max_freq <= rps->min_freq)
+ if (!intel_rps_is_enabled(rps))
return 0;
if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
* frequency, the actual frequency, and the observed clock rate.
*/
- if (!rps->enabled || rps->max_freq <= rps->min_freq)
+ if (!intel_rps_is_enabled(rps))
return 0;
if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */
return -EIO;
}
- if (!rps->active) {
+ if (!intel_rps_is_active(rps)) {
pr_err("%s: RPS not enabled on starting spinner\n",
engine->name);
igt_spinner_end(spin);
* First, let's check whether or not we are receiving interrupts.
*/
- if (!rps->enabled || rps->max_freq <= rps->min_freq)
+ if (!intel_rps_is_enabled(rps))
return 0;
intel_gt_pm_get(gt);
unsigned long saved_heartbeat;
intel_gt_pm_wait_for_idle(engine->gt);
- GEM_BUG_ON(rps->active);
+ GEM_BUG_ON(intel_rps_is_active(rps));
saved_heartbeat = engine_heartbeat_disable(engine);
* that theory.
*/
- if (!rps->enabled || rps->max_freq <= rps->min_freq)
+ if (!intel_rps_is_enabled(rps))
return 0;
if (!librapl_energy_uJ())
* moving parts into dynamic reclocking based on load.
*/
- if (!rps->enabled || rps->max_freq <= rps->min_freq)
+ if (!intel_rps_is_enabled(rps))
return 0;
if (igt_spinner_init(&spin, gt))
continue;
intel_gt_pm_wait_for_idle(gt);
- GEM_BUG_ON(rps->active);
+ GEM_BUG_ON(intel_rps_is_active(rps));
rps->cur_freq = rps->min_freq;
intel_engine_pm_get(engine);
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt.rps;
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
+ seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
- if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
+ if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;