struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ unsigned long flags;
ktime_t unused;
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
for_each_engine(engine, gt, id) {
guc_update_pm_timestamp(guc, engine, &unused);
guc_update_engine_gt_clks(engine);
}
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
static void guc_timestamp_ping(struct work_struct *wrk)
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
- unsigned long flags;
int srcu, ret;
/*
if (ret)
return;
- spin_lock_irqsave(&guc->timestamp.lock, flags);
-
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
__update_guc_busyness_stats(guc);
- spin_unlock_irqrestore(&guc->timestamp.lock, flags);
-
intel_gt_reset_unlock(gt, srcu);
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
void intel_guc_busyness_park(struct intel_gt *gt)
{
struct intel_guc *guc = >->uc.guc;
- unsigned long flags;
if (!guc_submission_initialized(guc))
return;
cancel_delayed_work(&guc->timestamp.work);
-
- spin_lock_irqsave(&guc->timestamp.lock, flags);
__update_guc_busyness_stats(guc);
- spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
void intel_guc_busyness_unpark(struct intel_gt *gt)