if (ret)
goto free_all_kdata;
- p->job->secure = cs->in.flags & AMDGPU_CS_FLAGS_SECURE;
-
if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
ret = -ECANCELED;
goto free_all_kdata;
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
unsigned fence_flags = 0;
+ bool secure;
unsigned i;
int r = 0;
if (job && ring->funcs->emit_cntxcntl) {
status |= job->preamble_status;
status |= job->preemption_status;
- amdgpu_ring_emit_cntxcntl(ring, status, job->secure);
+ amdgpu_ring_emit_cntxcntl(ring, status);
}
+ secure = false;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
+ /* If this IB is TMZ, add frame TMZ start packet,
+ * else, turn off TMZ.
+ */
+ if (ib->flags & AMDGPU_IB_FLAGS_SECURE && ring->funcs->emit_tmz) {
+ if (!secure) {
+ secure = true;
+ amdgpu_ring_emit_tmz(ring, true);
+ }
+ } else if (secure) {
+ secure = false;
+ amdgpu_ring_emit_tmz(ring, false);
+ }
+
amdgpu_ring_emit_ib(ring, job, ib, status);
status &= ~AMDGPU_HAVE_CTX_SWITCH;
}
- if (ring->funcs->emit_tmz)
- amdgpu_ring_emit_tmz(ring, false, job ? job->secure : false);
+ if (secure) {
+ secure = false;
+ amdgpu_ring_emit_tmz(ring, false);
+ }
#ifdef CONFIG_X86_64
if (!(adev->flags & AMD_IS_APU))
/* user fence handling */
uint64_t uf_addr;
uint64_t uf_sequence;
-
- /* the job is due to a secure command submission */
- bool secure;
};
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
- void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags,
- bool trusted);
+ void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
- void (*emit_tmz)(struct amdgpu_ring *ring, bool start, bool trusted);
+ void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
-#define amdgpu_ring_emit_cntxcntl(r, d, s) (r)->funcs->emit_cntxcntl((r), (d), (s))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
-#define amdgpu_ring_emit_tmz(r, b, s) (r)->funcs->emit_tmz((r), (b), (s))
+#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start,
- bool trusted);
+static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
}
static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
- uint32_t flags,
- bool trusted)
+ uint32_t flags)
{
uint32_t dw2 = 0;
gfx_v10_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
- gfx_v10_0_ring_emit_tmz(ring, true, trusted);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
sizeof(de_payload) >> 2);
}
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start,
- bool trusted)
+static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
{
- amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- /*
- * cmd = 0: frame begin
- * cmd = 1: frame end
- */
- amdgpu_ring_write(ring,
- ((amdgpu_is_tmz(ring->adev) && trusted) ? FRAME_TMZ : 0)
- | FRAME_CMD(start ? 0 : 1));
+ if (amdgpu_is_tmz(ring->adev)) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
+ amdgpu_ring_write(ring, FRAME_TMZ | FRAME_CMD(start ? 0 : 1));
+ }
}
static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
return clock;
}
-static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags,
- bool trusted)
+static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
if (flags & AMDGPU_HAVE_CTX_SWITCH)
gfx_v6_0_ring_emit_vgt_flush(ring);
amdgpu_ring_write(ring, control);
}
-static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags,
- bool trusted)
+static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
amdgpu_ring_write(ring, 0);
}
-static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags,
- bool trusted)
+static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start,
- bool trusted)
+static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
{
- amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- /*
- * cmd = 0: frame begin
- * cmd = 1: frame end
- */
- amdgpu_ring_write(ring,
- ((amdgpu_is_tmz(ring->adev) && trusted) ? FRAME_TMZ : 0)
- | FRAME_CMD(start ? 0 : 1));
+ if (amdgpu_is_tmz(ring->adev)) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
+ amdgpu_ring_write(ring, FRAME_TMZ | FRAME_CMD(start ? 0 : 1));
+ }
}
-static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags,
- bool trusted)
+static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
- gfx_v9_0_ring_emit_tmz(ring, true, trusted);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
__u64 chunk_data;
};
-/* Flag the command submission as secure */
-#define AMDGPU_CS_FLAGS_SECURE (1 << 0)
-
struct drm_amdgpu_cs_in {
/** Rendering context id */
__u32 ctx_id;
*/
#define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
+/* Flag the IB as secure (TMZ)
+ */
+#define AMDGPU_IB_FLAGS_SECURE (1 << 5)
+
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */