#define GUC_CLIENT_PRIORITY_NORMAL 3
#define GUC_CLIENT_PRIORITY_NUM 4
-#define GUC_MAX_STAGE_DESCRIPTORS 1024
-#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS
-
#define GUC_MAX_LRC_DESCRIPTORS 65535
#define GUC_INVALID_LRC_ID GUC_MAX_LRC_DESCRIPTORS
u32 reserved[30];
} __packed;
-/* engine id and context id is packed into guc_execlist_context.context_id*/
-#define GUC_ELC_CTXID_OFFSET 0
-#define GUC_ELC_ENGINE_OFFSET 29
-
-/* The execlist context including software and HW information */
-struct guc_execlist_context {
- u32 context_desc;
- u32 context_id;
- u32 ring_status;
- u32 ring_lrca;
- u32 ring_begin;
- u32 ring_end;
- u32 ring_next_free_location;
- u32 ring_current_tail_pointer_value;
- u8 engine_state_submit_value;
- u8 engine_state_wait_value;
- u16 pagefault_count;
- u16 engine_submit_queue_count;
-} __packed;
-
-/*
- * This structure describes a stage set arranged for a particular communication
- * between uKernel (GuC) and Driver (KMD). Technically, this is known as a
- * "GuC Context descriptor" in the specs, but we use the term "stage descriptor"
- * to avoid confusion with all the other things already named "context" in the
- * driver. A static pool of these descriptors are stored inside a GEM object
- * (stage_desc_pool) which is held for the entire lifetime of our interaction
- * with the GuC, being allocated before the GuC is loaded with its firmware.
- */
-struct guc_stage_desc {
- u32 sched_common_area;
- u32 stage_id;
- u32 pas_id;
- u8 engines_used;
- u64 db_trigger_cpu;
- u32 db_trigger_uk;
- u64 db_trigger_phy;
- u16 db_id;
-
- struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
-
- u8 attribute;
-
- u32 priority;
-
- u32 wq_sampled_tail_offset;
- u32 wq_total_submit_enqueues;
-
- u32 process_desc;
- u32 wq_addr;
- u32 wq_size;
-
- u32 engine_presence;
-
- u8 engine_suspended;
-
- u8 reserved0[3];
- u64 reserved1[1];
-
- u64 desc_private;
-} __packed;
-
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
#define CONTEXT_POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
return rb_entry(rb, struct i915_priolist, node);
}
-static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
+/* Future patches will use this function */
+__maybe_unused
+static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
{
- struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
+ struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
- return &base[id];
-}
-
-static int guc_stage_desc_pool_create(struct intel_guc *guc)
-{
- u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
- GUC_MAX_STAGE_DESCRIPTORS);
+ GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
- return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
- &guc->stage_desc_pool_vaddr);
+ return &base[index];
}
-static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
-}
-
-/*
- * Initialise/clear the stage descriptor shared with the GuC firmware.
- *
- * This descriptor tells the GuC where (in GGTT space) to find the important
- * data structures related to work submission (process descriptor, write queue,
- * etc).
- */
-static void guc_stage_desc_init(struct intel_guc *guc)
+static int guc_lrc_desc_pool_create(struct intel_guc *guc)
{
- struct guc_stage_desc *desc;
-
- /* we only use 1 stage desc, so hardcode it to 0 */
- desc = __get_stage_desc(guc, 0);
- memset(desc, 0, sizeof(*desc));
-
- desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
- GUC_STAGE_DESC_ATTR_KERNEL;
+ u32 size;
+ int ret;
- desc->stage_id = 0;
- desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
+ size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
+ GUC_MAX_LRC_DESCRIPTORS);
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
+ (void **)&guc->lrc_desc_pool_vaddr);
+ if (ret)
+ return ret;
- desc->wq_size = GUC_WQ_SIZE;
+ return 0;
}
-static void guc_stage_desc_fini(struct intel_guc *guc)
+static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
{
- struct guc_stage_desc *desc;
-
- desc = __get_stage_desc(guc, 0);
- memset(desc, 0, sizeof(*desc));
+ i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
}
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
int ret;
- if (guc->stage_desc_pool)
+ if (guc->lrc_desc_pool)
return 0;
- ret = guc_stage_desc_pool_create(guc);
+ ret = guc_lrc_desc_pool_create(guc);
if (ret)
return ret;
/*
* Keep static analysers happy, let them know that we allocated the
* vma after testing that it didn't exist earlier.
*/
- GEM_BUG_ON(!guc->stage_desc_pool);
+ GEM_BUG_ON(!guc->lrc_desc_pool);
return 0;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
- if (guc->stage_desc_pool) {
- guc_stage_desc_pool_destroy(guc);
- }
+ if (guc->lrc_desc_pool)
+ guc_lrc_desc_pool_destroy(guc);
}
static int guc_context_alloc(struct intel_context *ce)
void intel_guc_submission_enable(struct intel_guc *guc)
{
- guc_stage_desc_init(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(gt->awake); /* GT should be parked first */
/* Note: By the time we're here, GuC may have already been reset */
-
- guc_stage_desc_fini(guc);
}
static bool __guc_submission_selected(struct intel_guc *guc)