if (!HAS_EXECLISTS(i915))
return -ENODEV;
- if (USES_GUC_SUBMISSION(i915))
+ if (intel_uc_uses_guc_submission(&i915->gt.uc))
return -ENODEV; /* not implement yet */
if (get_user(idx, &ext->engine_index))
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0;
igt_global_reset_lock(gt);
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0; /* presume black blox */
if (intel_vgpu_active(gt->i915))
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0;
for_each_engine(engine, gt, id) {
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
* are preserved.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0;
/* As we use CS_GPR we cannot run before they existed on all engines. */
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
* forgotten.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0;
if (!intel_has_reset_engine(gt))
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(>->uc))
return 0;
intel_gt_pm_get(gt);
ce->vm = i915_vm_get(&ppgtt->vm);
intel_context_set_single_submission(ce);
- if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ /* Max ring buffer size */
+ if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
const unsigned int ring_size = 512 * SZ_4K;
ce->ring = __intel_context_ring_size(ring_size);
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->gt.uc.guc;
- struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+ struct intel_uc *uc = &dev_priv->gt.uc;
+ struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
int index;
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!intel_uc_uses_guc_submission(uc))
return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* Having GuC is not the same as using GuC */
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
-
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
return 0;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
+ if (intel_uc_uses_guc_submission(&dev_priv->gt.uc)) {
drm_err(&dev_priv->drm,
"i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;