#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
+#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
struct ct_request {
struct list_head link;
static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
{
+ u32 space;
+
ctb->broken = false;
ctb->tail = 0;
ctb->head = 0;
- ctb->space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size);
+ space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
+ atomic_set(&ctb->space, space);
guc_ct_buffer_desc_init(ctb->desc);
}
static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
struct guc_ct_buffer_desc *desc,
- u32 *cmds, u32 size_in_bytes)
+ u32 *cmds, u32 size_in_bytes, u32 resv_space)
{
GEM_BUG_ON(size_in_bytes % 4);
ctb->desc = desc;
ctb->cmds = cmds;
ctb->size = size_in_bytes / 4;
+ ctb->resv_space = resv_space / 4;
guc_ct_buffer_reset(ctb);
}
struct guc_ct_buffer_desc *desc;
u32 blob_size;
u32 cmds_size;
+ u32 resv_space;
void *blob;
u32 *cmds;
int err;
desc = blob;
cmds = blob + 2 * CTB_DESC_SIZE;
cmds_size = CTB_H2G_BUFFER_SIZE;
- CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send",
- ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+ resv_space = 0;
+ CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
+ ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+ resv_space);
- guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size);
+ guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
/* store pointers to desc and cmds for recv ctb */
desc = blob + CTB_DESC_SIZE;
cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
cmds_size = CTB_G2H_BUFFER_SIZE;
- CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv",
- ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+ resv_space = G2H_ROOM_BUFFER_SIZE;
+ CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
+ ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+ resv_space);
- guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size);
+ guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
return 0;
}
/* update local copies */
ctb->tail = tail;
- GEM_BUG_ON(ctb->space < len + GUC_CTB_HDR_LEN);
- ctb->space -= len + GUC_CTB_HDR_LEN;
+ GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
+ atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
/* now update descriptor */
WRITE_ONCE(desc->tail, tail);
return ret;
}
+static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
+
+ /*
+ * We leave a certain amount of space in the G2H CTB buffer for
+ * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
+ */
+ return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
+}
+
+static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+ lockdep_assert_held(&ct->ctbs.send.lock);
+
+ GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
+
+ if (g2h_len_dw)
+ atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
+static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+ atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
u32 head;
u32 space;
- if (ctb->space >= len_dw)
+ if (atomic_read(&ctb->space) >= len_dw)
return true;
head = READ_ONCE(desc->head);
}
space = CIRC_SPACE(ctb->tail, head, ctb->size);
- ctb->space = space;
+ atomic_set(&ctb->space, space);
return space >= len_dw;
}
-static int has_room_nb(struct intel_guc_ct *ct, u32 len_dw)
+static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
{
lockdep_assert_held(&ct->ctbs.send.lock);
- if (unlikely(!h2g_has_room(ct, len_dw))) {
+ if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
return 0;
}
+#define G2H_LEN_DW(f) ({ \
+ typeof(f) f_ = (f); \
+ FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
+ FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
+ GUC_CTB_HXG_MSG_MIN_LEN : 0; \
+})
static int ct_send_nb(struct intel_guc_ct *ct,
const u32 *action,
u32 len,
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
unsigned long spin_flags;
+ u32 g2h_len_dw = G2H_LEN_DW(flags);
u32 fence;
int ret;
spin_lock_irqsave(&ctb->lock, spin_flags);
- ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN);
+ ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
if (unlikely(ret))
goto out;
if (unlikely(ret))
goto out;
+ g2h_reserve_space(ct, g2h_len_dw);
intel_guc_notify(ct_to_guc(ct));
out:
/*
* We use a lazy spin wait loop here as we believe that if the CT
* buffers are sized correctly the flow control condition should be
- * rare.
+ * rare. Reserving the maximum size in the G2H credits as we don't know
+ * how big the response is going to be.
*/
retry:
spin_lock_irqsave(&ctb->lock, flags);
- if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
+ if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
+ !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
spin_unlock_irqrestore(&ctb->lock, flags);
spin_unlock(&ct->requests.lock);
err = ct_write(ct, action, len, fence, 0);
+ g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
spin_unlock_irqrestore(&ctb->lock, flags);
intel_guc_notify(ct_to_guc(ct));
err = wait_for_ct_request_update(&request, status);
+ g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
if (unlikely(err))
goto unlink;
static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
{
const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
+ u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
unsigned long flags;
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
+ /*
+ * Adjusting the space must be done in IRQ or deadlock can occur as the
+ * CTB processing in the below workqueue can send CTBs which creates a
+ * circular dependency if the space was returned there.
+ */
+ switch (action) {
+ case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+ case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+ g2h_release_space(ct, request->size);
+ }
+
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
spin_unlock_irqrestore(&ct->requests.lock, flags);
struct intel_context *ce = rq->context;
u32 action[3];
int len = 0;
+ u32 g2h_len_dw = 0;
bool enabled = context_enabled(ce);
GEM_BUG_ON(!atomic_read(&ce->guc_id_ref));
action[len++] = GUC_CONTEXT_ENABLE;
set_context_pending_enable(ce);
intel_context_get(ce);
+ g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
} else {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
action[len++] = ce->guc_id;
}
- err = intel_guc_send_nb(guc, action, len);
-
+ err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
if (!enabled && !err) {
set_context_enabled(ce);
} else if (!enabled) {
offset,
};
- return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), true);
+ return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
}
static int register_context(struct intel_context *ce)
guc_id,
};
- return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), true);
+ return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_DEREGISTER_CONTEXT, true);
}
static int deregister_context(struct intel_context *ce, u32 guc_id)
intel_context_get(ce);
- intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), true);
+ intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action),
+ G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
}
static u16 prep_context_pending_disable(struct intel_context *ce)