}
err = 0;
- with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
- wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
+ struct intel_runtime_pm *rpm = engine->uncore->rpm;
intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
}
void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
/*
wmb();
- if (INTEL_INFO(i915)->has_coherent_ggtt)
+ if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
return;
intel_gt_chipset_flush(gt);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- struct intel_uncore *uncore = gt->uncore;
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
unsigned long flags;
spin_lock_irqsave(&uncore->lock, flags);
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(>->wakeref, >->i915->runtime_pm, &wf_ops);
+ intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops);
BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications);
}
/* We expect to be idle already; but also want to be independent */
wait_for_idle(gt);
- with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
intel_rc6_disable(>->rc6);
}
if (intel_gt_is_wedged(gt))
return;
- wakeref = intel_runtime_pm_get_if_in_use(>->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
if (!wakeref)
return;
if (hung)
hangcheck_declare_hang(gt, hung, stuck);
- intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
/* Reset timer in case GPU hangs without another request being added */
intel_gt_queue_hangcheck(gt);
intel_wakeref_t wakeref;
mutex_lock(>->reset.mutex);
- with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
__intel_gt_set_wedged(gt);
mutex_unlock(>->reset.mutex);
}
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
wake_up_all(>->reset.queue);
out:
- intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
drain_delayed_work(>->hangcheck.work); /* flush param */
err = intel_gt_live_subtests(tests, gt);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
/* Check that we can issue a global GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reset_count = i915_reset_count(>->i915->gpu_error);
err = -EINVAL;
}
- intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_gt_set_wedged(gt);
GEM_BUG_ON(!intel_gt_is_wedged(gt));
intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
return intel_gt_is_wedged(gt) ? -EIO : 0;
GEM_BUG_ON(IS_ERR(ce));
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(engine->uncore->rpm, wakeref)
rq = igt_spinner_create_request(spin, ce, MI_NOOP);
intel_context_put(ce);
if (err)
goto out_spin;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(engine->uncore->rpm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
/*
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
if (!intel_huc_is_supported(huc))
return -ENODEV;
- with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status.reg);
return (status & huc->status.mask) == huc->status.value;
if (!intel_guc_is_running(guc))
return;
- with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
intel_uc_runtime_suspend(uc);
}
{
struct intel_gt *gt = guc_to_gt(guc);
- assert_rpm_wakelock_held(>->i915->runtime_pm);
+ assert_rpm_wakelock_held(gt->uncore->rpm);
spin_lock_irq(>->irq_lock);
gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
{
struct intel_gt *gt = guc_to_gt(guc);
- assert_rpm_wakelock_held(>->i915->runtime_pm);
+ assert_rpm_wakelock_held(gt->uncore->rpm);
spin_lock_irq(>->irq_lock);
if (!guc->interrupts.enabled) {
{
struct intel_gt *gt = guc_to_gt(guc);
- assert_rpm_wakelock_held(>->i915->runtime_pm);
+ assert_rpm_wakelock_held(gt->uncore->rpm);
spin_lock_irq(>->irq_lock);
guc->interrupts.enabled = false;