]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915/gvt: Introduce intel_vgpu_submission
authorZhi Wang <zhi.a.wang@intel.com>
Sun, 10 Sep 2017 13:15:18 +0000 (21:15 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Thu, 16 Nov 2017 03:46:42 +0000 (11:46 +0800)
Introduce intel_vgpu_submission to hold all members related to submission
in struct intel_vgpu before.

Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/render.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c

index 0ee40a5ee1923aaf48db5b458d766513f436737e..f118454d2eabbc5118e6b854ffd0cd7a8a731d02 100644 (file)
@@ -362,7 +362,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
 {
        intel_vgpu_unpin_mm(workload->shadow_mm);
        intel_gvt_mm_unreference(workload->shadow_mm);
-       kmem_cache_free(workload->vgpu->workloads, workload);
+       kmem_cache_free(workload->vgpu->submission.workloads, workload);
 }
 
 #define get_desc_from_elsp_dwords(ed, i) \
@@ -401,7 +401,8 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
                                        struct intel_vgpu_workload,
                                        wa_ctx);
        int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+       struct intel_vgpu_submission *s = &workload->vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
                shadow_ctx->engine[ring_id].state->obj;
        struct execlist_ring_context *shadow_ring_context;
@@ -474,6 +475,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
+       struct intel_vgpu_submission *s = &vgpu->submission;
        struct execlist_ctx_descriptor_format ctx[2];
        int ring_id = workload->ring_id;
        int ret;
@@ -514,7 +516,7 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
        ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
        ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
 
-       ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+       ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
        if (!ret)
                goto out;
        else
@@ -533,7 +535,8 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        int ring_id = workload->ring_id;
-       struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
        struct intel_vgpu_workload *next_workload;
        struct list_head *next = workload_q_head(vgpu, ring_id)->next;
        bool lite_restore = false;
@@ -652,6 +655,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
                struct execlist_ctx_descriptor_format *desc,
                bool emulate_schedule_in)
 {
+       struct intel_vgpu_submission *s = &vgpu->submission;
        struct list_head *q = workload_q_head(vgpu, ring_id);
        struct intel_vgpu_workload *last_workload = get_last_workload(q);
        struct intel_vgpu_workload *workload = NULL;
@@ -689,7 +693,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 
        gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
 
-       workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
+       workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
        if (!workload)
                return -ENOMEM;
 
@@ -738,7 +742,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
        }
 
        if (emulate_schedule_in)
-               workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
+               workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
 
        gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
                        workload, ring_id, head, tail, start, ctl);
@@ -748,7 +752,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 
        ret = prepare_mm(workload);
        if (ret) {
-               kmem_cache_free(vgpu->workloads, workload);
+               kmem_cache_free(s->workloads, workload);
                return ret;
        }
 
@@ -769,7 +773,8 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
 
 int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
 {
-       struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
        struct execlist_ctx_descriptor_format *desc[2];
        int i, ret;
 
@@ -811,7 +816,8 @@ inv_desc:
 
 static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 {
-       struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
        struct execlist_context_status_pointer_format ctx_status_ptr;
        u32 ctx_status_ptr_reg;
 
@@ -833,6 +839,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 
 static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
 {
+       struct intel_vgpu_submission *s = &vgpu->submission;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine;
        struct intel_vgpu_workload *pos, *n;
@@ -841,12 +848,11 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
        /* free the unsubmited workloads in the queues. */
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
                list_for_each_entry_safe(pos, n,
-                       &vgpu->workload_q_head[engine->id], list) {
+                       &s->workload_q_head[engine->id], list) {
                        list_del_init(&pos->list);
                        free_workload(pos);
                }
-
-               clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
+               clear_bit(engine->id, s->shadow_ctx_desc_updated);
        }
 }
 
index 9c2e7c0aa38fb68670a26789d4efcf58aa42b607..c3f84f26090aa5877a9f71c6b83e2689275c6342 100644 (file)
@@ -142,6 +142,15 @@ struct vgpu_sched_ctl {
        int weight;
 };
 
+struct intel_vgpu_submission {
+       struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
+       struct list_head workload_q_head[I915_NUM_ENGINES];
+       struct kmem_cache *workloads;
+       atomic_t running_workload_num;
+       struct i915_gem_context *shadow_ctx;
+       DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+};
+
 struct intel_vgpu {
        struct intel_gvt *gvt;
        int id;
@@ -161,16 +170,12 @@ struct intel_vgpu {
        struct intel_vgpu_gtt gtt;
        struct intel_vgpu_opregion opregion;
        struct intel_vgpu_display display;
-       struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
-       struct list_head workload_q_head[I915_NUM_ENGINES];
-       struct kmem_cache *workloads;
-       atomic_t running_workload_num;
+       struct intel_vgpu_submission submission;
        /* 1/2K for each reserve ring buffer */
        void *reserve_ring_buffer_va[I915_NUM_ENGINES];
        int reserve_ring_buffer_size[I915_NUM_ENGINES];
        DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
-       struct i915_gem_context *shadow_ctx;
-       DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
+
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
        struct {
index 820d2667054118e36eb214826f6071625b1d8899..00893532394ac0093ffb241c5cf4c185311bb54f 100644 (file)
@@ -1451,7 +1451,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
                return -EINVAL;
 
-       execlist = &vgpu->execlist[ring_id];
+       execlist = &vgpu->submission.execlist[ring_id];
 
        execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
        if (execlist->elsp_dwords.index == 3) {
index 96060920a6fea2d9eead2134f313dabd3a003b09..fc6878bb24968ea1d6197e470a9e22ec25749ffd 100644 (file)
@@ -1188,7 +1188,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
                struct intel_vgpu *vgpu = (struct intel_vgpu *)
                        mdev_get_drvdata(mdev);
                return sprintf(buf, "%u\n",
-                              vgpu->shadow_ctx->hw_id);
+                              vgpu->submission.shadow_ctx->hw_id);
        }
        return sprintf(buf, "\n");
 }
index 6d066cf354789313aaf3b9d49d1a8cc9110d5ed8..db4d091be60b721dfdaaa1b944814396f29bc471 100644 (file)
@@ -261,14 +261,15 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
 static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       struct render_mmio *mmio;
-       u32 v;
-       int i, array_size;
-       u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
        u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
        u32 inhibit_mask =
                _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
        i915_reg_t last_reg = _MMIO(0);
+       struct render_mmio *mmio;
+       u32 v;
+       int i, array_size;
 
        if (IS_SKYLAKE(vgpu->gvt->dev_priv)
                || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
index 81952139b00ccc22fb52682ba86640c722ff30f5..864a2bc06e45c5e2037f4b6fda986e49136b0e8e 100644 (file)
@@ -57,7 +57,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
        int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+       struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
                shadow_ctx->engine[ring_id].state->obj;
        struct execlist_ring_context *shadow_ring_context;
@@ -249,12 +249,13 @@ void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
  */
 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 {
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
-       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine = dev_priv->engine[ring_id];
        struct drm_i915_gem_request *rq;
-       struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_ring *ring;
        int ret;
 
@@ -267,7 +268,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
                                    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-       if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+       if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
                shadow_context_descriptor_update(shadow_ctx,
                                        dev_priv->engine[ring_id]);
 
@@ -326,9 +327,11 @@ err_scan:
 
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
-       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine = dev_priv->engine[ring_id];
        int ret = 0;
 
@@ -414,7 +417,7 @@ static struct intel_vgpu_workload *pick_next_workload(
 
        gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
 
-       atomic_inc(&workload->vgpu->running_workload_num);
+       atomic_inc(&workload->vgpu->submission.running_workload_num);
 out:
        mutex_unlock(&gvt->lock);
        return workload;
@@ -424,8 +427,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
        int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
                shadow_ctx->engine[ring_id].state->obj;
        struct execlist_ring_context *shadow_ring_context;
@@ -491,15 +495,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-       struct intel_vgpu_workload *workload;
-       struct intel_vgpu *vgpu;
+       struct intel_vgpu_workload *workload =
+               scheduler->current_workload[ring_id];
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct intel_vgpu_submission *s = &vgpu->submission;
        int event;
 
        mutex_lock(&gvt->lock);
 
-       workload = scheduler->current_workload[ring_id];
-       vgpu = workload->vgpu;
-
        /* For the workload w/ request, needs to wait for the context
         * switch to make sure request is completed.
         * For the workload w/o request, directly complete the workload.
@@ -536,7 +539,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                }
                mutex_lock(&dev_priv->drm.struct_mutex);
                /* unpin shadow ctx as the shadow_ctx update is done */
-               engine->context_unpin(engine, workload->vgpu->shadow_ctx);
+               engine->context_unpin(engine, s->shadow_ctx);
                mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
@@ -548,7 +551,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
        list_del_init(&workload->list);
        workload->complete(workload);
 
-       atomic_dec(&vgpu->running_workload_num);
+       atomic_dec(&s->running_workload_num);
        wake_up(&scheduler->workload_complete_wq);
 
        if (gvt->scheduler.need_reschedule)
@@ -637,14 +640,15 @@ complete:
 
 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 {
+       struct intel_vgpu_submission *s = &vgpu->submission;
        struct intel_gvt *gvt = vgpu->gvt;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 
-       if (atomic_read(&vgpu->running_workload_num)) {
+       if (atomic_read(&s->running_workload_num)) {
                gvt_dbg_sched("wait vgpu idle\n");
 
                wait_event(scheduler->workload_complete_wq,
-                               !atomic_read(&vgpu->running_workload_num));
+                               !atomic_read(&s->running_workload_num));
        }
 }
 
@@ -718,8 +722,10 @@ err:
  */
 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
 {
-       i915_gem_context_put(vgpu->shadow_ctx);
-       kmem_cache_destroy(vgpu->workloads);
+       struct intel_vgpu_submission *s = &vgpu->submission;
+
+       i915_gem_context_put(s->shadow_ctx);
+       kmem_cache_destroy(s->workloads);
 }
 
 /**
@@ -734,35 +740,36 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
  */
 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 {
+       struct intel_vgpu_submission *s = &vgpu->submission;
        enum intel_engine_id i;
        struct intel_engine_cs *engine;
        int ret;
 
-       vgpu->shadow_ctx = i915_gem_context_create_gvt(
+       s->shadow_ctx = i915_gem_context_create_gvt(
                        &vgpu->gvt->dev_priv->drm);
-       if (IS_ERR(vgpu->shadow_ctx))
-               return PTR_ERR(vgpu->shadow_ctx);
+       if (IS_ERR(s->shadow_ctx))
+               return PTR_ERR(s->shadow_ctx);
 
-       bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+       bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
-       vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
+       s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
                        sizeof(struct intel_vgpu_workload), 0,
                        SLAB_HWCACHE_ALIGN,
                        NULL);
 
-       if (!vgpu->workloads) {
+       if (!s->workloads) {
                ret = -ENOMEM;
                goto out_shadow_ctx;
        }
 
        for_each_engine(engine, vgpu->gvt->dev_priv, i)
-               INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+               INIT_LIST_HEAD(&s->workload_q_head[i]);
 
-       atomic_set(&vgpu->running_workload_num, 0);
+       atomic_set(&s->running_workload_num, 0);
 
        return 0;
 
 out_shadow_ctx:
-       i915_gem_context_put(vgpu->shadow_ctx);
+       i915_gem_context_put(s->shadow_ctx);
        return ret;
 }
index c216aefaa73e168a4101c4bc69f7a530c1c487a8..3ca0087f10b20dfdffdcfe361b89acdf04ee2714 100644 (file)
@@ -122,7 +122,7 @@ struct intel_shadow_bb_entry {
 };
 
 #define workload_q_head(vgpu, ring_id) \
-       (&(vgpu->workload_q_head[ring_id]))
+       (&(vgpu->submission.workload_q_head[ring_id]))
 
 #define queue_workload(workload) do { \
        list_add_tail(&workload->list, \
index 3d69871d28e9ef746c536880ca0116a523ff11ee..35a5ec20120628140240ecee670b2f818446d5de 100644 (file)
@@ -226,7 +226,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 
        vgpu->active = false;
 
-       if (atomic_read(&vgpu->running_workload_num)) {
+       if (atomic_read(&vgpu->submission.running_workload_num)) {
                mutex_unlock(&gvt->lock);
                intel_gvt_wait_vgpu_idle(vgpu);
                mutex_lock(&gvt->lock);
@@ -293,7 +293,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
        vgpu->gvt = gvt;
 
        for (i = 0; i < I915_NUM_ENGINES; i++)
-               INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+               INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
 
        ret = intel_vgpu_init_sched_policy(vgpu);
        if (ret)