u32 value)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
- sleeve = create_sleeve(vm, obj, pages, page_sizes);
+ sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old = ctx->vm;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
ctx->vm = i915_vm_get(vm);
ctx->desc_template = default_desc_template(ctx->i915, vm);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(vm);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
return old;
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_address_space *vm = rq->gem_context->vm;
+ struct i915_address_space *vm = rq->hw_context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
set_ppgtt_barrier,
old);
if (err) {
- ctx->vm = old;
- ctx->desc_template = default_desc_template(ctx->i915, old);
- i915_vm_put(vm);
+ i915_vm_put(__set_ppgtt(ctx, old));
+ i915_vm_put(old);
}
unlock:
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
- struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
case 1:
/* Too fragmented, unbind everything and retry */
- err = i915_gem_evict_vm(eb->vm);
+ err = i915_gem_evict_vm(eb->context->vm);
if (err)
return err;
break;
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->vm) {
- eb->vm = ctx->vm;
+ if (ctx->vm)
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
- } else {
- eb->vm = &eb->i915->ggtt.vm;
- }
eb->context_flags = 0;
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
goto err_vma;
}
- vma = i915_vma_instance(obj, eb->vm, NULL);
+ vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
struct intel_context *ce,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
int err;
- /* XXX: ce->vm please */
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
kref_init(&ce->ref);
ce->gem_context = ctx;
+ ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
void intel_context_fini(struct intel_context *ce)
{
+ i915_vm_put(ce->vm);
+
mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active);
}
struct intel_context {
struct kref ref;
- struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
#define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight)
#define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight)
+ struct i915_address_space *vm;
+ struct i915_gem_context *gem_context;
+
struct list_head signal_link;
struct list_head signals;
void *vaddr;
int ret;
- GEM_BUG_ON(!ce->gem_context->vm);
-
ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
static int emit_pdps(struct i915_request *rq)
{
const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt =
- i915_vm_to_ppgtt(rq->gem_context->vm);
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
int err, i;
u32 *cs;
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(request->gem_context->vm))
+ if (i915_vm_is_4lvl(request->hw_context->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
{
struct i915_address_space *vm;
- vm = ce->gem_context->vm;
- if (!vm)
- vm = &ce->engine->gt->ggtt->alias->vm;
+ vm = ce->vm;
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
return vm;
}
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id)
intel_context_unpin(s->shadow[id]);