struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
unsigned int buffer_count;
}
static struct i915_vma *
-shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+shadow_batch_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned int flags)
{
- struct i915_address_space *vm;
struct i915_vma *vma;
- u64 flags;
int err;
- /*
- * PPGTT backed shadow buffers must be mapped RO, to prevent
- * post-scan tampering
- */
- if (CMDPARSER_USES_GGTT(eb->i915)) {
- vm = &eb->engine->gt->ggtt->vm;
- flags = PIN_GLOBAL;
- } else {
- vm = eb->context->vm;
- if (!vm->has_read_only) {
- DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
- return ERR_PTR(-EINVAL);
- }
-
- i915_gem_object_set_readonly(obj);
- flags = PIN_USER;
- }
-
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
return vma;
static int eb_parse(struct i915_execbuffer *eb)
{
struct intel_engine_pool_node *pool;
- struct i915_vma *vma;
+ struct i915_vma *shadow, *trampoline;
+ unsigned int len;
int err;
if (!eb_use_cmdparser(eb))
return 0;
- pool = intel_engine_get_pool(eb->engine, eb->batch_len);
+ len = eb->batch_len;
+ if (!CMDPARSER_USES_GGTT(eb->i915)) {
+ /*
+ * ppGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
+ */
+ if (!eb->context->vm->has_read_only) {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return -EINVAL;
+ }
+ } else {
+ len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
+ }
+
+ pool = intel_engine_get_pool(eb->engine, len);
if (IS_ERR(pool))
return PTR_ERR(pool);
- vma = shadow_batch_pin(eb, pool->obj);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
+ shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
goto err;
}
+ i915_gem_object_set_readonly(shadow->obj);
+
+ trampoline = NULL;
+ if (CMDPARSER_USES_GGTT(eb->i915)) {
+ trampoline = shadow;
+
+ shadow = shadow_batch_pin(pool->obj,
+ &eb->engine->gt->ggtt->vm,
+ PIN_GLOBAL);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
+ shadow = trampoline;
+ goto err_shadow;
+ }
+
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+ }
err = intel_engine_cmd_parser(eb->engine,
eb->batch,
eb->batch_start_offset,
eb->batch_len,
- vma);
- if (err) {
- /*
- * Unsafe GGTT-backed buffers can still be submitted safely
- * as non-secure.
- * For PPGTT backing however, we have no choice but to forcibly
- * reject unsafe buffers
- */
- if (i915_vma_is_ggtt(vma) && err == -EACCES)
- err = 0;
-
- goto err_unpin;
- }
+ shadow, trampoline);
+ if (err)
+ goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(vma);
+ eb->vma[eb->buffer_count] = i915_vma_get(shadow);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- vma->exec_flags = &eb->flags[eb->buffer_count];
+ shadow->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
+ eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = vma;
-
- if (i915_vma_is_ggtt(vma))
- eb->batch_flags |= I915_DISPATCH_SECURE;
-
- /* eb->batch_len unchanged */
+ eb->batch = shadow;
- vma->private = pool;
+ shadow->private = pool;
return 0;
-err_unpin:
- i915_vma_unpin(vma);
+err_trampoline:
+ if (trampoline)
+ i915_vma_unpin(trampoline);
+err_shadow:
+ i915_vma_unpin(shadow);
err:
intel_engine_pool_put(pool);
return err;
if (err)
return err;
+ if (eb->trampoline) {
+ GEM_BUG_ON(eb->batch_start_offset);
+ err = eb->engine->emit_bb_start(eb->request,
+ eb->trampoline->node.start +
+ eb->batch_len,
+ 0, 0);
+ if (err)
+ return err;
+ }
+
if (i915_gem_context_nopreempt(eb->gem_context))
eb->request->flags |= I915_REQUEST_NOPREEMPT;
eb.buffer_count = args->buffer_count;
eb.batch_start_offset = args->batch_start_offset;
eb.batch_len = args->batch_len;
+ eb.trampoline = NULL;
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
+ if (eb.trampoline)
+ i915_vma_unpin(eb.trampoline);
mutex_unlock(&dev->struct_mutex);
err_engine:
eb_unpin_engine(&eb);
return 0;
}
-static unsigned long *
-alloc_whitelist(struct drm_i915_private *i915, u32 batch_length)
+static unsigned long *alloc_whitelist(u32 batch_length)
{
unsigned long *jmp;
* reasonably cheap due to kmalloc caches.
*/
- if (CMDPARSER_USES_GGTT(i915))
- return NULL;
-
/* Prefer to report transient allocation failure rather than hit oom */
jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
* @batch_offset: byte offset in the batch at which execution starts
* @batch_length: length of the commands in batch_obj
* @shadow: validated copy of the batch buffer in question
+ * @trampoline: whether to emit a conditional trampoline at the end of the batch
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
struct i915_vma *batch,
u32 batch_offset,
u32 batch_length,
- struct i915_vma *shadow)
+ struct i915_vma *shadow,
+ bool trampoline)
{
u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
return PTR_ERR(cmd);
}
- /* Defer failure until attempted use */
- jump_whitelist = alloc_whitelist(engine->i915, batch_length);
+ jump_whitelist = NULL;
+ if (!trampoline)
+ /* Defer failure until attempted use */
+ jump_whitelist = alloc_whitelist(batch_length);
shadow_addr = gen8_canonical_addr(shadow->node.start);
batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
}
} while (1);
+ if (trampoline) {
+ /*
+ * With the trampoline, the shadow is executed twice.
+ *
+ * 1 - starting at offset 0, in privileged mode
+ * 2 - starting at offset batch_len, as non-privileged
+ *
+ * Only if the batch is valid and safe to execute, do we
+ * allow the first privileged execution to proceed. If not,
+ * we terminate the first batch and use the second batchbuffer
+ * entry to chain to the original unsafe non-privileged batch,
+ * leaving it to the HW to validate.
+ */
+ *batch_end = MI_BATCH_BUFFER_END;
+
+ if (ret) {
+ /* Batch unsafe to execute with privileges, cancel! */
+ cmd = page_mask_bits(shadow->obj->mm.mapping);
+ *cmd = MI_BATCH_BUFFER_END;
+
+ /* If batch is unsafe but valid, jump to the original */
+ if (ret == -EACCES) {
+ unsigned int flags;
+
+ flags = MI_BATCH_NON_SECURE_I965;
+ if (IS_HASWELL(engine->i915))
+ flags = MI_BATCH_NON_SECURE_HSW;
+
+ GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
+ __gen6_emit_bb_start(batch_end,
+ batch_addr,
+ flags);
+
+ ret = 0; /* allow execution */
+ }
+ }
+
+ if (needs_clflush_after)
+ drm_clflush_virt_range(batch_end, 8);
+ }
+
if (needs_clflush_after) {
void *ptr = page_mask_bits(shadow->obj->mm.mapping);