ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
- seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
- read, write,
+ seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+ read, engine->csb_head,
+ write,
+ intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
yesno(test_bit(ENGINE_IRQ_EXECLIST,
&engine->irq_posted)));
if (read >= GEN8_CSB_ENTRIES)
* new request (outside of the context-switch interrupt).
*/
while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
- u32 __iomem *csb_mmio =
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
/* The HWSP contains a (cacheable) mirror of the CSB */
const u32 *buf =
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
if (unlikely(engine->csb_use_mmio)) {
buf = (u32 * __force)
(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ engine->csb_head = -1; /* force mmio read of CSB ptrs */
}
/* The write will be ordered by the uncached read (itself
* is set and we do a new loop.
*/
__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- head = readl(csb_mmio);
- tail = GEN8_CSB_WRITE_PTR(head);
- head = GEN8_CSB_READ_PTR(head);
+ if (unlikely(engine->csb_head == -1)) { /* following a reset */
+ head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ tail = GEN8_CSB_WRITE_PTR(head);
+ head = GEN8_CSB_READ_PTR(head);
+ engine->csb_head = head;
+ } else {
+ const int write_idx =
+ intel_hws_csb_write_index(dev_priv) -
+ I915_HWS_CSB_BUF0_INDEX;
+
+ head = engine->csb_head;
+ tail = READ_ONCE(buf[write_idx]);
+ }
while (head != tail) {
struct drm_i915_gem_request *rq;
unsigned int status;
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
}
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
- csb_mmio);
+ if (head != engine->csb_head) {
+ engine->csb_head = head;
+ writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
+ dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ }
}
if (execlists_elsp_ready(engine))
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ engine->csb_head = -1;
/* After a GPU reset, we may have requests to replay */
submit = false;
struct rb_root execlist_queue;
struct rb_node *execlist_first;
unsigned int fw_domains;
+ unsigned int csb_head;
bool csb_use_mmio;
/* Contexts are pinned whilst they are active on the GPU. The last
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
#define I915_HWS_CSB_BUF0_INDEX 0x10
+#define I915_HWS_CSB_WRITE_INDEX 0x1f
+#define CNL_HWS_CSB_WRITE_INDEX 0x2f
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);