struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
struct i915_ggtt *ggtt = &i915->ggtt;
bool write = area->vm_flags & VM_WRITE;
intel_wakeref_t wakeref;
if (ret)
goto err;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(rpm);
srcu = i915_reset_trylock(i915);
if (srcu < 0) {
goto err_fence;
/* Mark as being mmapped into userspace for later revocation */
- assert_rpm_wakelock_held(&i915->runtime_pm);
+ assert_rpm_wakelock_held(rpm);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
err_reset:
i915_reset_unlock(i915, srcu);
err_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
* wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (!obj->userfault_count)
goto out;
wmb();
out:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static int create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_gem_object *obj, *on;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
cond_resched();
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
* we will force the wake during oom-notifier.
*/
if (shrink & I915_SHRINK_BOUND) {
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
if (!wakeref)
shrink &= ~I915_SHRINK_BOUND;
}
}
if (shrink & I915_SHRINK_BOUND)
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_retire_requests(i915);
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
}
}
unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
goto out_unlock;
}
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ce = i915_gem_context_get_engine(ctx, RCS0);
if (IS_ERR(ce)) {
out_context:
intel_context_put(ce);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_gem_object_put(obj);
out_unlock:
GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out_unlock:
if (igt_live_test_end(&t))
err = -EIO;
}
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
IGT_TIMEOUT(end);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
return true;
/* If the whole device is asleep, the engine must be idle */
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return true;
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return idle;
}
rcu_read_unlock();
- wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
+ wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(engine->i915, wakeref);
+ intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
if (i915_terminally_wedged(dev_priv))
return;
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return;
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
/* Reset timer in case GPU hangs without another request being added */
i915_queue_hangcheck(dev_priv);
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
engine_mask &= INTEL_INFO(i915)->engine_mask;
wake_up_all(&error->reset_queue);
out:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
int i915_reset_trylock(struct drm_i915_private *i915)
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reset_count = i915_reset_count(&i915->gpu_error);
count = 0;
do {
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
mock_file_free(i915, file);
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
mock_file_free(i915, file);
if (i915_reset_failed(i915))
if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
mutex_unlock(&i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx_hi = kernel_context(i915);
if (!ctx_hi)
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
pr_err("Logical preemption supported, but not exposed\n");
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
return 0; /* presume black blox */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &a))
goto err_unlock;
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
goto err_unlock;
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &hi))
goto err_unlock;
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(smoke.i915);
+ wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
- intel_runtime_pm_put(smoke.i915, wakeref);
+ intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
i915_gem_set_wedged(i915);
GEM_BUG_ON(!i915_reset_failed(i915));
i915_reset(i915, ALL_ENGINES, NULL);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
return i915_reset_failed(i915) ? -EIO : 0;
if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_unlock(&i915->drm.struct_mutex);
file = mock_file(i915);
mock_file_free(i915, file);
mutex_lock(&i915->drm.struct_mutex);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
out:
kernel_context_close(ctx);
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
return ok ? 0 : -ESRCH;
return PTR_ERR(ctx);
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
err:
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);
if (WARN_ON(!vgpu_fence_sz(vgpu)))
return;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&dev_priv->drm.struct_mutex);
_clear_vgpu_fence(vgpu);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct i915_fence_reg *reg;
int i;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(rpm);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(rpm);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(rpm);
return -ENOSPC;
}
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
_clear_vgpu_fence(vgpu);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
/**
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
}
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
/**
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
mutex_unlock(&vgpu->gvt->sched_lock);
}
* as there is only one pre-allocated buf-obj for shadow.
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
if (ret) {
intel_wakeref_t wakeref;
int i, pipe;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t pref;
}
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
intel_wakeref_t wakeref;
int ret = 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_GEN(dev_priv, 5)) {
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret;
}
if (!HAS_FBC(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
if (!HAS_IPS(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915_modparams.enable_ips));
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
struct intel_uncore *uncore = &dev_priv->uncore;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
if (!psr->sink_support)
return 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
if (psr->enabled)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
ret = intel_psr_debug_set(dev_priv, val);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret;
}
csr = &dev_priv->csr;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
seq_printf(m, "path: %s\n", csr->fw_path);
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
struct drm_connector_list_iter conn_iter;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
enum intel_engine_id id;
struct drm_printer p;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "GT awake? %s [%d]\n",
yesno(dev_priv->gt.awake),
for_each_engine(engine, dev_priv, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
if (INTEL_GEN(i915) < 6)
return 0;
- file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
+ file->private_data =
+ (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
intel_uncore_forcewake_user_get(&i915->uncore);
return 0;
return 0;
intel_uncore_forcewake_user_put(&i915->uncore);
- intel_runtime_pm_put(i915,
+ intel_runtime_pm_put(&i915->runtime_pm,
(intel_wakeref_t)(uintptr_t)file->private_data);
return 0;
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
i915_vma_unpin(vma);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct dma_fence *fence;
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(rpm);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
i915_vma_unpin(vma);
}
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
GEM_TRACE("\n");
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
intel_gt_sanitize(i915, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_lock(&i915->drm.struct_mutex);
i915_gem_contexts_lost(i915);
* the objects as well, see i915_gem_freeze()
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
i915_gem_shrink(i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915);
i915_gem_object_unlock(obj);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
- wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
+ wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
}
- intel_runtime_pm_put(fence->i915, wakeref);
+ intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
return 0;
}
unsigned int pde;
bool flush = false;
- wakeref = intel_runtime_pm_get(vm->i915);
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
spin_lock(&ppgtt->base.pd.lock);
gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
gen6_ggtt_invalidate(vm->i915);
}
- intel_runtime_pm_put(vm->i915, wakeref);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return 0;
unwind_out:
- intel_runtime_pm_put(vm->i915, wakeref);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
gen6_ppgtt_clear_range(vm, from, start - from);
return -ENOMEM;
}
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv, stream->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
if (stream->ctx)
oa_put_render_ctx_id(stream);
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- stream->wakeref = intel_runtime_pm_get(dev_priv);
+ stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv, stream->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
err_config:
if (stream->ctx)
wakeref = 0;
if (READ_ONCE(dev_priv->gt.awake))
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
static void
static u64 get_rc6(struct drm_i915_private *i915)
{
#if IS_ENABLED(CONFIG_PM)
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (wakeref) {
val = __get_rc6(i915);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
/*
* If we are coming back from being runtime suspended we must
spin_unlock_irqrestore(&i915->pmu.lock, flags);
} else {
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
/*
* We are runtime suspended.
intel_wakeref_t wakeref;
u32 freq;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_punit_get(dev_priv);
freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
}
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
i915_gem_object_lock(obj);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
i915_gem_object_unlock(obj);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
- intel_runtime_pm_put(dev_priv, intel_state->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- intel_state->wakeref = intel_runtime_pm_get(dev_priv);
+ intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
drm_atomic_state_get(state);
i915_sw_fence_init(&intel_state->commit_ready,
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&intel_state->commit_ready);
- intel_runtime_pm_put(dev_priv, intel_state->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
return ret;
}
i915_sw_fence_commit(&intel_state->commit_ready);
drm_atomic_helper_cleanup_planes(dev, state);
- intel_runtime_pm_put(dev_priv, intel_state->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
return ret;
}
dev_priv->wm.distrust_bios_wm = false;
goto out_verify;
cancel_delayed_work(&power_domains->async_put_work);
- intel_runtime_pm_put_raw(dev_priv,
+ intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
fetch_and_zero(&power_domains->async_put_wakeref));
out_verify:
verify_async_put_domains_state(power_domains);
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
+ intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(dev_priv, domain);
intel_wakeref_t wakeref;
bool is_enabled;
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return false;
mutex_unlock(&power_domains->lock);
if (!is_enabled) {
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
wakeref = 0;
}
enum intel_display_power_domain domain)
{
__intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
static void
struct drm_i915_private *dev_priv =
container_of(power_domains, struct drm_i915_private,
power_domains);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
* wakeref to make the state checker happy about the HW access during
* power well disabling.
*/
- assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm);
- wakeref = intel_runtime_pm_get(dev_priv);
+ assert_rpm_raw_wakeref_held(rpm);
+ wakeref = intel_runtime_pm_get(rpm);
for_each_power_domain(domain, mask) {
/* Clear before put, so put's sanity check is happy. */
__intel_display_power_put_domain(dev_priv, domain);
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
}
static void
container_of(work, struct drm_i915_private,
power_domains.async_put_work.work);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
intel_wakeref_t old_work_wakeref = 0;
mutex_lock(&power_domains->lock);
mutex_unlock(&power_domains->lock);
if (old_work_wakeref)
- intel_runtime_pm_put_raw(dev_priv, old_work_wakeref);
+ intel_runtime_pm_put_raw(rpm, old_work_wakeref);
if (new_work_wakeref)
- intel_runtime_pm_put_raw(dev_priv, new_work_wakeref);
+ intel_runtime_pm_put_raw(rpm, new_work_wakeref);
}
/**
intel_wakeref_t wakeref)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915);
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
mutex_lock(&power_domains->lock);
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(i915, work_wakeref);
+ intel_runtime_pm_put_raw(rpm, work_wakeref);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
}
/**
mutex_unlock(&power_domains->lock);
if (work_wakeref)
- intel_runtime_pm_put_raw(i915, work_wakeref);
+ intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
}
/**
intel_wakeref_t wakeref)
{
__intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
#endif
intel_power_domains_verify_state(i915);
/* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
/**
}
mutex_lock(&dev->struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
return ret;
}
intel_wakeref_t wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
for_each_hpd_pin(pin) {
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,
/**
* intel_runtime_pm_get_raw - grab a raw runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This is the unlocked version of intel_display_power_is_enabled() and should
* only be used from error capture and recovery code where deadlocks are
* Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
-
-intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
{
- return __intel_runtime_pm_get(&i915->runtime_pm, false);
+ return __intel_runtime_pm_get(rpm, false);
}
/**
* intel_runtime_pm_get - grab a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on) and ensures that it is powered up.
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
{
- return __intel_runtime_pm_get(&i915->runtime_pm, true);
+ return __intel_runtime_pm_get(rpm, true);
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
{
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
-
if (IS_ENABLED(CONFIG_PM)) {
/*
* In cases runtime PM is disabled by the RPM core and we get
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on).
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
{
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
-
assert_rpm_wakelock_held(rpm);
pm_runtime_get_noresume(rpm->kdev);
/**
* intel_runtime_pm_put_raw - release a raw runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
* @wref: wakeref acquired for the reference that is being released
*
* This function drops the device-level runtime pm reference obtained by
* hardware block right away if this is the last reference.
*/
void
-intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
+intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- __intel_runtime_pm_put(&i915->runtime_pm, wref, false);
+ __intel_runtime_pm_put(rpm, wref, false);
}
/**
* intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* new code, as the correctness of its use cannot be checked. Always use
* intel_runtime_pm_put() instead.
*/
-void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
+void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
{
- __intel_runtime_pm_put(&i915->runtime_pm, -1, true);
+ __intel_runtime_pm_put(rpm, -1, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/**
* intel_runtime_pm_put - release a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
* @wref: wakeref acquired for the reference that is being released
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
-void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- __intel_runtime_pm_put(&i915->runtime_pm, wref, true);
+ __intel_runtime_pm_put(rpm, wref, true);
}
#endif
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
#define with_intel_runtime_pm(i915, wf) \
- for ((wf) = intel_runtime_pm_get(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+ for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \
+ intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
#define with_intel_runtime_pm_if_in_use(i915, wf) \
- for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+ for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \
+ intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0)
-void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
+void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
+void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
#else
static inline void
-intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- intel_runtime_pm_put_unchecked(i915);
+ intel_runtime_pm_put_unchecked(rpm);
}
#endif
-void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref);
+void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf)
{
- wf->wakeref = intel_runtime_pm_get(i915);
+ wf->wakeref = intel_runtime_pm_get(&i915->runtime_pm);
}
static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
GEM_BUG_ON(!wakeref);
}
wakeref = fetch_and_zero(&wf->wakeref);
spin_unlock_irqrestore(&wf->lock, flags);
- intel_runtime_pm_put(wf->i915, wakeref);
+ intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
}
void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
spin_lock_irqsave(&wf->lock, flags);
if (!refcount_inc_not_zero(&wf->count)) {
GEM_BUG_ON(wf->wakeref);
- wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915);
+ wf->wakeref = intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm);
refcount_set(&wf->count, 1);
}
spin_unlock_irqrestore(&wf->lock, flags);
/* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
/* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
}
i915_active_fini(&active.base);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
{
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
*/
trash_stolen(i915);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
count = n;
if (err)
goto out_unpin;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
i915_request_put(request[id]);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
* On real hardware this time.
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
file = mock_file(i915);
if (IS_ERR(file)) {
out_file:
mock_file_free(i915, file);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret;
}
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl)) {
out_free:
i915_timeline_put(tl);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (t = types; *t; t++) {
for (p = planes; p->width; p++) {
}
out:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_put(obj);
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
return 0;
}
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
}
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}