]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/i915: Emit await(batch) before MI_BB_START
authorChris Wilson <chris@chris-wilson.co.uk>
Sun, 10 May 2020 10:24:29 +0000 (11:24 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 11 May 2020 15:50:04 +0000 (16:50 +0100)
Be consistent and ensure that we always emit the asynchronous waits
prior to issuing instructions that use the address. This ensures that if
we do emit GPU commands to do the await, they are before our use!

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200510102431.21959-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
drivers/gpu/drm/i915/gt/intel_renderstate.c
drivers/gpu/drm/i915/selftests/i915_request.c

index 87d264fe54b2a7165d4258291d59b7f16411aec9..b8197889064141b366de1accd2b387d773eb9bfa 100644 (file)
@@ -972,12 +972,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
                goto err_batch;
        }
 
-       err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
-                                       0);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(batch);
        err = i915_request_await_object(rq, batch->obj, false);
        if (err == 0)
@@ -994,6 +988,18 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
        if (err)
                goto skip_request;
 
+       if (rq->engine->emit_init_breadcrumb) {
+               err = rq->engine->emit_init_breadcrumb(rq);
+               if (err)
+                       goto skip_request;
+       }
+
+       err = rq->engine->emit_bb_start(rq,
+                                       batch->node.start, batch->node.size,
+                                       0);
+       if (err)
+               goto skip_request;
+
        i915_vma_unpin_and_release(&batch, 0);
        i915_vma_unpin(vma);
 
@@ -1005,7 +1011,6 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 
 skip_request:
        i915_request_set_error_once(rq, err);
-err_request:
        i915_request_add(rq);
 err_batch:
        i915_vma_unpin_and_release(&batch, 0);
@@ -1541,10 +1546,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
                goto err_unpin;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(vma);
        err = i915_request_await_object(rq, vma->obj, false);
        if (err == 0)
@@ -1553,6 +1554,16 @@ static int write_to_scratch(struct i915_gem_context *ctx,
        if (err)
                goto skip_request;
 
+       if (rq->engine->emit_init_breadcrumb) {
+               err = rq->engine->emit_init_breadcrumb(rq);
+               if (err)
+                       goto skip_request;
+       }
+
+       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+       if (err)
+               goto skip_request;
+
        i915_vma_unpin(vma);
 
        i915_request_add(rq);
@@ -1560,7 +1571,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
        goto out_vm;
 skip_request:
        i915_request_set_error_once(rq, err);
-err_request:
        i915_request_add(rq);
 err_unpin:
        i915_vma_unpin(vma);
@@ -1674,10 +1684,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
                goto err_unpin;
        }
 
-       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(vma);
        err = i915_request_await_object(rq, vma->obj, true);
        if (err == 0)
@@ -1686,6 +1692,16 @@ static int read_from_scratch(struct i915_gem_context *ctx,
        if (err)
                goto skip_request;
 
+       if (rq->engine->emit_init_breadcrumb) {
+               err = rq->engine->emit_init_breadcrumb(rq);
+               if (err)
+                       goto skip_request;
+       }
+
+       err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+       if (err)
+               goto skip_request;
+
        i915_vma_unpin(vma);
 
        i915_request_add(rq);
@@ -1708,7 +1724,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
        goto out_vm;
 skip_request:
        i915_request_set_error_once(rq, err);
-err_request:
        i915_request_add(rq);
 err_unpin:
        i915_vma_unpin(vma);
index 772d8cba7da9715e3efeaa07955d08cf13dc7011..8b3925b4036ffdc669f1542adbb990aced2232a2 100644 (file)
@@ -83,6 +83,7 @@ igt_emit_store_dw(struct i915_vma *vma,
                offset += PAGE_SIZE;
        }
        *cmd = MI_BATCH_BUFFER_END;
+
        i915_gem_object_unpin_map(obj);
 
        intel_gt_chipset_flush(vma->vm->gt);
@@ -126,16 +127,6 @@ int igt_gpu_fill_dw(struct intel_context *ce,
                goto err_batch;
        }
 
-       flags = 0;
-       if (INTEL_GEN(ce->vm->i915) <= 5)
-               flags |= I915_DISPATCH_SECURE;
-
-       err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
-                                       flags);
-       if (err)
-               goto err_request;
-
        i915_vma_lock(batch);
        err = i915_request_await_object(rq, batch->obj, false);
        if (err == 0)
@@ -152,15 +143,17 @@ int igt_gpu_fill_dw(struct intel_context *ce,
        if (err)
                goto skip_request;
 
-       i915_request_add(rq);
-
-       i915_vma_unpin_and_release(&batch, 0);
+       flags = 0;
+       if (INTEL_GEN(ce->vm->i915) <= 5)
+               flags |= I915_DISPATCH_SECURE;
 
-       return 0;
+       err = rq->engine->emit_bb_start(rq,
+                                       batch->node.start, batch->node.size,
+                                       flags);
 
 skip_request:
-       i915_request_set_error_once(rq, err);
-err_request:
+       if (err)
+               i915_request_set_error_once(rq, err);
        i915_request_add(rq);
 err_batch:
        i915_vma_unpin_and_release(&batch, 0);
index 708cb78088655d91200273cfe2d02fe1ca4cfe28..f59e7875cc5ec9255252d6a0f0e7636a5aab0461 100644 (file)
@@ -219,6 +219,14 @@ int intel_renderstate_emit(struct intel_renderstate *so,
        if (!so->vma)
                return 0;
 
+       i915_vma_lock(so->vma);
+       err = i915_request_await_object(rq, so->vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(so->vma, rq, 0);
+       i915_vma_unlock(so->vma);
+       if (err)
+               return err;
+
        err = engine->emit_bb_start(rq,
                                    so->batch_offset, so->batch_size,
                                    I915_DISPATCH_SECURE);
@@ -233,13 +241,7 @@ int intel_renderstate_emit(struct intel_renderstate *so,
                        return err;
        }
 
-       i915_vma_lock(so->vma);
-       err = i915_request_await_object(rq, so->vma->obj, false);
-       if (err == 0)
-               err = i915_vma_move_to_active(so->vma, rq, 0);
-       i915_vma_unlock(so->vma);
-
-       return err;
+       return 0;
 }
 
 void intel_renderstate_fini(struct intel_renderstate *so)
index 15b1ca9f7a01c468eb4af3b1f1193ab67c8dacbe..ffdfcb3805b533b9b27190fe35b83ff588f72fa8 100644 (file)
@@ -865,13 +865,6 @@ static int live_all_engines(void *arg)
                        goto out_request;
                }
 
-               err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
-                                           0);
-               GEM_BUG_ON(err);
-               request[idx]->batch = batch;
-
                i915_vma_lock(batch);
                err = i915_request_await_object(request[idx], batch->obj, 0);
                if (err == 0)
@@ -879,6 +872,13 @@ static int live_all_engines(void *arg)
                i915_vma_unlock(batch);
                GEM_BUG_ON(err);
 
+               err = engine->emit_bb_start(request[idx],
+                                           batch->node.start,
+                                           batch->node.size,
+                                           0);
+               GEM_BUG_ON(err);
+               request[idx]->batch = batch;
+
                i915_request_get(request[idx]);
                i915_request_add(request[idx]);
                idx++;
@@ -993,13 +993,6 @@ static int live_sequential_engines(void *arg)
                        }
                }
 
-               err = engine->emit_bb_start(request[idx],
-                                           batch->node.start,
-                                           batch->node.size,
-                                           0);
-               GEM_BUG_ON(err);
-               request[idx]->batch = batch;
-
                i915_vma_lock(batch);
                err = i915_request_await_object(request[idx],
                                                batch->obj, false);
@@ -1008,6 +1001,13 @@ static int live_sequential_engines(void *arg)
                i915_vma_unlock(batch);
                GEM_BUG_ON(err);
 
+               err = engine->emit_bb_start(request[idx],
+                                           batch->node.start,
+                                           batch->node.size,
+                                           0);
+               GEM_BUG_ON(err);
+               request[idx]->batch = batch;
+
                i915_request_get(request[idx]);
                i915_request_add(request[idx]);