}
}
-#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
- struct intel_vgpu_submission *s = &vgpu->submission;
enum intel_engine_id i;
struct intel_engine_cs *engine;
for_each_engine(engine, vgpu->gvt->dev_priv, i)
init_vgpu_execlist(vgpu, i);
- /* each ring has a shadow ring buffer until vgpu destroyed */
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- s->ring_scan_buffer[i] =
- kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
- if (!s->ring_scan_buffer[i]) {
- gvt_vgpu_err("fail to alloc ring scan buffer\n");
- goto out;
- }
- s->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
- }
return 0;
-out:
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (s->ring_scan_buffer_size[i]) {
- kfree(s->ring_scan_buffer[i]);
- s->ring_scan_buffer[i] = NULL;
- s->ring_scan_buffer_size[i] = 0;
- }
- }
- return -ENOMEM;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
- /* 1/2K for each engine */
void *ring_scan_buffer[I915_NUM_ENGINES];
int ring_scan_buffer_size[I915_NUM_ENGINES];
};